.classpath | 5 modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java | 77 + modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java | 65 - modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/ClusterInitService.java | 101 + modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java | 6 modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java | 17 modules/common/cassandra-schema/pom.xml | 21 modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java | 27 modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java | 72 - modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java | 48 modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java | 6 modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java | 41 modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java | 47 modules/common/drift/pom.xml | 22 modules/common/filetemplate-bundle/pom.xml | 6 modules/common/jboss-as/pom.xml | 7 modules/common/pom.xml | 6 modules/core/dbutils/pom.xml | 5 modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml | 2 modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageClusterSettings.java | 56 modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java | 25 modules/core/domain/src/main/java/org/rhq/core/domain/measurement/MeasurementBaseline.java | 16 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/ClusterConfigurationEditor.java | 238 ++++ modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java | 86 - modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java | 9 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasourceField.java | 2 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java | 170 +- modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java | 294 +++- modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java | 10 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceDetailView.java | 7 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceTitleBar.java | 21 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java | 28 modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java | 42 modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java | 27 modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/storage/FakeStorageClusterSettingsManagerBean.java | 1 modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml | 14 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java | 63 - modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java | 2 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java | 2 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementBaselineManagerBean.java | 68 - modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementBaselineManagerLocal.java | 7 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java | 8 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java | 2 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettings.java | 56 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java | 5 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerLocal.java | 1 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java | 594 +++++++--- modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java | 24 modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/LookupUtil.java | 8 modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java | 3 modules/enterprise/server/plugins/yum/pom.xml | 16 modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/DiskReader.java | 60 - modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/HttpReader.java | 90 - modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/RepoProvider.java | 42 modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/UrlReader.java | 111 + modules/enterprise/server/plugins/yum/src/main/resources/META-INF/rhq-serverplugin.xml | 6 modules/enterprise/server/plugins/yum/src/test/java/Acme/Serve/UrlReaderTestServer.java | 64 + modules/enterprise/server/plugins/yum/src/test/java/org/rhq/enterprise/server/plugins/yum/UrlReaderTest.java | 195 +++ modules/enterprise/server/plugins/yum/src/test/resources/test.file | 1 modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsBaselineCalculator.java | 21 modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsBaselineCalculatorTest.java | 48 modules/helpers/metrics-simulator/src/main/java/org/rhq/metrics/simulator/Simulator.java | 51 modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java | 130 +- modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml | 36 modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java | 6 65 files changed, 2450 insertions(+), 897 deletions(-)
New commits: commit e7f325aefa9da750a7e69ec01513b573a46481f1 Author: Jay Shaughnessy jshaughn@redhat.com Date: Tue Aug 20 16:05:20 2013 -0400
fix merge issue
diff --git a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml index 2c48bbd..6adccba 100644 --- a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml +++ b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml @@ -1943,7 +1943,7 @@ </schemaSpec>
<schemaSpec version="2.125"> - <!-- Note, JON 3.1.2 includes this already but it is safe to re-execute --> + <!-- Note, JON 3.1.2 includes this already but it is safe to re-execute --> <!-- We were using a buggy version of password obfuscation from JBoss AS. This task sets the affected passwords to NULL. Fortunately, this currently affects only CSP content sources, so the damage is not huge from the user perspective. --> @@ -2211,26 +2211,6 @@ </schema-directSQL> </schemaSpec>
- <schemaSpec version="2.137"> - <schema-addColumn table="RHQ_STORAGE_NODE" column="MAINTENANCE_PENDING" columnType="BOOLEAN"/> - <schema-addColumn table="RHQ_STORAGE_NODE" column="ERROR_MSG" columnType="LONGVARCHAR"/> - <schema-addColumn table="RHQ_STORAGE_NODE" column="RESOURCE_OP_HIST_ID" columnType="INTEGER"/> - <schema-directSQL> - <statement desc="Creating RHQ_STORAGE_NODE foreign key to RHQ_OPERATION_HISTORY"> - ALTER TABLE RHQ_STORAGE_NODE - ADD CONSTRAINT RHQ_SN_OP_HIST_ID_FK - FOREIGN KEY (RESOURCE_OP_HIST_ID) - REFERENCES RHQ_OPERATION_HISTORY (ID) - </statement> - <statement targetDBVendor="postgresql" desc="Set maintenance_pending flag to false for existing storage nodes"> - UPDATE RHQ_STORAGE_NODE SET IGNORED = false - </statement> - <statement targetDBVendor="oracle" desc="Set maintenance_pending flag to false for existing storage nodes"> - UPDATE RHQ_STORAGE_NODE SET IGNORED = 0 - </statement> - </schema-directSQL> - </schemaSpec> - </dbupgrade> </target> </project>
commit f8127fd04d6e168794d1b6e5f22949b5e5f92b09 Author: Jay Shaughnessy jshaughn@redhat.com Date: Tue Aug 20 15:41:42 2013 -0400
add jsandas latest library additions
diff --git a/.classpath b/.classpath index 63f9170..6985799 100644 --- a/.classpath +++ b/.classpath @@ -392,5 +392,8 @@ <classpathentry kind="var" path="M2_REPO/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar"/> <classpathentry kind="var" path="M2_REPO/org/apache/maven/plugin-tools/maven-plugin-annotations/3.2/maven-plugin-annotations-3.2.jar"/> <classpathentry kind="var" path="M2_REPO/org/jboss/jboss-vfs/3.1.0.Final/jboss-vfs-3.1.0.Final.jar"/> + <classpathentry kind="var" path="M2_REPO/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar"/> + <classpathentry kind="var" path="M2_REPO/com/fasterxml/jackson/core/jackson-databind/2.1.1/jackson-databind-2.1.1.jar"/> + <classpathentry kind="var" path="M2_REPO/com/fasterxml/jackson/core/jackson-core/2.1.1/jackson-core-2.1.1.jar"/> <classpathentry kind="output" path="eclipse-classes"/> </classpath>
commit fb39c1968e72e7aa65146b2d27142abbe2001ab5 Author: Lukas Krejci lkrejci@redhat.com Date: Tue Aug 20 14:24:39 2013 +0200
[BZ 988881] - Removed missing i18n, cancel avail checking timers on logout.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceTitleBar.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceTitleBar.java index fbbf0a8..8fdaf6e 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceTitleBar.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceTitleBar.java @@ -395,9 +395,14 @@ public class ResourceTitleBar extends EnhancedVLayout { new AsyncCallback<List<ResourceError>>() { public void onFailure(Throwable caught) { pluginErrors.setVisible(false); - CoreGUI.getErrorHandler().handleError( - MSG.dataSource_resourceErrors_error_fetchFailure(String.valueOf(resourceComposite.getResource() - .getId())), caught); + + if (UserSessionManager.isLoggedOut()) { + resourceAvailAndErrorsRefreshTime.cancel(); + } else { + CoreGUI.getErrorHandler().handleError( + MSG.dataSource_resourceErrors_error_fetchFailure(String.valueOf(resourceComposite.getResource() + .getId())), caught); + }
if (latch != null) { latch.countDown(); @@ -427,9 +432,13 @@ public class ResourceTitleBar extends EnhancedVLayout { @Override public void onFailure(Throwable caught) { availabilityImage.setSrc(ImageManager.getAvailabilityLargeIconFromAvailType(currentAvail)); - CoreGUI.getErrorHandler().handleError("I18N: Failed to refresh the availability", caught); - //MSG.dataSource_resourceErrors_error_fetchFailure(String.valueOf(resourceComposite.getResource() - // .getId())), caught); + + if (UserSessionManager.isLoggedOut()) { + resourceAvailAndErrorsRefreshTime.cancel(); + } else { + CoreGUI.getErrorHandler().handleError(MSG.view_inventory_resource_loadFailed(String.valueOf(resource.getId())), caught); + } + if (latch != null) { latch.countDown(); } else {
commit 89f76f4574f4d592db77d2b23e2af2c48a3908c8 Author: John Sanda jsanda@redhat.com Date: Mon Aug 19 22:25:49 2013 -0400
update logic of updateSchemaIfNecessary to handle removal of nodes
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java index 99bd592..b511fe0 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java @@ -165,9 +165,8 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa storageNode = storageNodeOperationsHandler.setMode(storageNode, StorageNode.OperationMode.DECOMMISSION); List<StorageNode> storageNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL).getResultList(); - storageNodes.add(storageNode);
- boolean runRepair = updateSchemaIfNecessary(storageNodes); + boolean runRepair = updateSchemaIfNecessary(storageNodes.size() + 1, storageNodes.size()); // This is a bit of a hack since the maintenancePending flag is really intended to // queue up storage nodes during cluster maintenance operations. storageNode.setMaintenancePending(runRepair); @@ -205,7 +204,6 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa
@Override public void performAddNodeMaintenance(Subject subject, StorageNode storageNode) { - storageNode.setOperationMode(StorageNode.OperationMode.ADD_MAINTENANCE); List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL) .getResultList(); @@ -214,7 +212,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa } storageNode.setMaintenancePending(true); clusterNodes.add(storageNode); - boolean runRepair = updateSchemaIfNecessary(clusterNodes); + boolean runRepair = updateSchemaIfNecessary(clusterNodes.size(), clusterNodes.size() + 1); performAddNodeMaintenance(subject, storageNode, runRepair, createPropertyListOfAddresses(SEEDS_LIST, clusterNodes)); } @@ -701,59 +699,92 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa resourceType.getPlugin().equals(STORAGE_NODE_PLUGIN_NAME); }
- private boolean updateSchemaIfNecessary(List<StorageNode> storageNodes) { - // The previous cluster size will be the current size - 1 since we currently only - // support deploying one node at a time. - int previousClusterSize = storageNodes.size() - 1; + private boolean updateSchemaIfNecessary(int previousClusterSize, int newClusterSize) { boolean isRepairNeeded; int replicationFactor = 1;
- if (previousClusterSize >= 4) { - // At 4 nodes we increase the RF to 3. We are not increasing the RF beyond - // that for additional nodes; so, there is no need to run repair if we are - // expanding from a 4 node cluster since the RF remains the same. + if (previousClusterSize == 0) { + throw new IllegalStateException("previousClusterSize cannot be 0"); + } + if (newClusterSize == 0) { + throw new IllegalStateException("newClusterSize cannot be 0"); + } + if (Math.abs(newClusterSize - previousClusterSize) != 1) { + throw new IllegalStateException("The absolute difference between previousClusterSize[" + + previousClusterSize + "] and newClusterSize[" + newClusterSize + "] must be 1"); + } + + if (newClusterSize == 1) { + isRepairNeeded = false; + replicationFactor = 1; + } else if (previousClusterSize > 4 && newClusterSize == 4) { isRepairNeeded = false; - } else if (previousClusterSize == 1) { - // The RF will increase since we are going from a single to a multi-node - // cluster; therefore, we want to run repair. + } else if (previousClusterSize == 4 && newClusterSize == 3) { isRepairNeeded = true; replicationFactor = 2; - } else if (previousClusterSize == 2) { - if (storageNodes.size() > 3) { - // If we go from 2 to > 3 nodes we will increase the RF to 3; therefore - // we want to run repair. - isRepairNeeded = true; - replicationFactor = 3; - } else { - // If we go from 2 to 3 nodes, we keep the RF at 2 so there is no need - // to run repair. - isRepairNeeded = false; - } - } else if (previousClusterSize == 3) { - // We are increasing the cluster size > 3 which means the RF will be - // updated to 3; therefore, we want to run repair. + } else if (previousClusterSize == 3 && newClusterSize == 2) { + isRepairNeeded = false; + } else if (previousClusterSize == 1 && newClusterSize == 2) { + isRepairNeeded = true; + replicationFactor = 2; + } else if (previousClusterSize == 2 && newClusterSize == 3) { + isRepairNeeded = false; + } else if (previousClusterSize == 3 && newClusterSize == 4) { isRepairNeeded = true; replicationFactor = 3; + } else if (previousClusterSize == 4 && newClusterSize > 4) { + isRepairNeeded = false; } else { - // If we cluster size of zero, then something is really screwed up. It - // should always be > 0. - throw new RuntimeException("The previous cluster size should never be zero at this point"); + throw new IllegalStateException("previousClusterSize[" + previousClusterSize + "] and newClusterSize[" + + newClusterSize + "] is not supported"); }
- if (isRepairNeeded) { -// String username = getRequiredStorageProperty(USERNAME_PROPERTY); -// String password = getRequiredStorageProperty(PASSWORD_PROPERTY); -// SchemaManager schemaManager = new SchemaManager(username, password, storageNodes); -// try{ -// schemaManager.updateTopology(); -// } catch (Exception e) { -// log.error("An error occurred while applying schema topology changes", e); + + + +// if (newClusterSize == 1) { +// isRepairNeeded = false; +// replicationFactor = 1; +// } else if (previousClusterSize >= 4) { +// // At 4 nodes we increase the RF to 3. We are not increasing the RF beyond +// // that for additional nodes; so, there is no need to run repair if we are +// // expanding from a 4 node cluster since the RF remains the same. +// isRepairNeeded = false; +// } else if (previousClusterSize == 1) { +// // The RF will increase since we are going from a single to a multi-node +// // cluster; therefore, we want to run repair. +// isRepairNeeded = true; +// replicationFactor = 2; +// } else if (previousClusterSize == 2) { +// if (storageNodes.size() > 3) { +// // If we go from 2 to > 3 nodes we will increase the RF to 3; therefore +// // we want to run repair. +// isRepairNeeded = true; +// replicationFactor = 3; +// } else { +// // If we go from 2 to 3 nodes, we keep the RF at 2 so there is no need +// // to run repair. +// isRepairNeeded = false; // } +// } else if (previousClusterSize == 3) { +// // We are increasing the cluster size > 3 which means the RF will be +// // updated to 3; therefore, we want to run repair. +// isRepairNeeded = true; +// replicationFactor = 3; +// } else { +// // If we cluster size of zero, then something is really screwed up. It +// // should always be > 0. +// throw new RuntimeException("The previous cluster size should never be zero at this point"); +// }
+ if (isRepairNeeded) { updateReplicationFactor(replicationFactor); if (previousClusterSize == 1) { updateGCGraceSeconds(691200); // 8 days } + } else if (newClusterSize == 1) { + updateReplicationFactor(1); + updateGCGraceSeconds(0); }
return isRepairNeeded;
commit a41c58828525cef776fe097e8836652c903f8337 Author: John Sanda jsanda@redhat.com Date: Mon Aug 19 21:21:22 2013 -0400
make sure the mode is updated at the end of each (un)deployment phase
The transaction demarcation has been updated to ensure that upon successful completion of a (un)deployment phase, the storage node's mode will be updated before starting the next phase. Previously it was done in the same transaction, and if starting the next phase failed, then the node's mode would still be set to the previously completed phase. This did not break anything, but it did result in extra work.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 4d4fd17..28723d7 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -238,6 +238,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
switch (storageNode.getOperationMode()) { case INSTALLED: + storageNode.setOperationMode(OperationMode.ANNOUNCE); case ANNOUNCE: reset(); storageNodeOperationsHandler.announceStorageNode(subject, storageNode); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java index b068734..99bd592 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java @@ -82,7 +82,6 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa if (log.isInfoEnabled()) { log.info("Announcing " + storageNode + " to storage node cluster."); } - storageNode.setOperationMode(StorageNode.OperationMode.ANNOUNCE); List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL).getResultList(); List<StorageNode> allNodes = new ArrayList<StorageNode>(clusterNodes); @@ -113,7 +112,6 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa public void unannounceStorageNode(Subject subject, StorageNode storageNode) { log.info("Unannouncing " + storageNode);
- storageNode.setOperationMode(StorageNode.OperationMode.UNANNOUNCE); List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL).getResultList(); for (StorageNode clusterNode : clusterNodes) { @@ -133,8 +131,6 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa public void uninstall(Subject subject, StorageNode storageNode) { log.info("Uninstalling " + storageNode);
- storageNode.setOperationMode(StorageNode.OperationMode.UNINSTALL); - if (storageNode.getResource() == null) { finishUninstall(subject, storageNode); } else { @@ -142,6 +138,19 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa } }
+ private void finishUninstall(Subject subject, StorageNode storageNode) { + if (storageNode.getResource() != null) { + log.info("Removing storage node resource " + storageNode.getResource() + " from inventory"); + Resource resource = storageNode.getResource(); + storageNodeOperationsHandler.detachFromResource(storageNode); + resourceManager.uninventoryResource(subject, resource.getId()); + } + log.info("Removing storage node entity " + storageNode + " from database"); + entityManager.remove(storageNode); + + log.info(storageNode + " has been undeployed"); + } + @Override @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public void detachFromResource(StorageNode storageNode) { @@ -153,7 +162,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa public void decommissionStorageNode(Subject subject, StorageNode storageNode) { log.info("Preparing to decommission " + storageNode);
- storageNode.setOperationMode(StorageNode.OperationMode.DECOMMISSION); + storageNode = storageNodeOperationsHandler.setMode(storageNode, StorageNode.OperationMode.DECOMMISSION); List<StorageNode> storageNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL).getResultList(); storageNodes.add(storageNode); @@ -184,6 +193,10 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa StorageNode.class).setParameter("address", storageNodeAddress.getHostAddress()).getSingleResult();
if (storageNode.getOperationMode() == StorageNode.OperationMode.BOOTSTRAP) { + // TODO need to add support for HA deployments + // If multiple RHQ servers are running, they will all receive the event + // notification that the node is up and will all wind up calling this method. + storageNode = storageNodeOperationsHandler.setMode(storageNode, StorageNode.OperationMode.ADD_MAINTENANCE); performAddNodeMaintenance(subjectManager.getOverlord(), storageNode); } else { log.info(storageNode + " has already been bootstrapped. Skipping add node maintenance."); @@ -225,7 +238,11 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa StorageNode.class).setParameter("address", storageNodeAddress.getHostAddress()).getSingleResult();
if (storageNode.getOperationMode() == StorageNode.OperationMode.DECOMMISSION) { - storageNode.setOperationMode(StorageNode.OperationMode.REMOVE_MAINTENANCE); + // TODO need to add support for HA deployments + // If multiple RHQ servers are running, they will all receive the event + // notification that the node is up and will all wind up calling this method. + storageNode = storageNodeOperationsHandler.setMode(storageNode, + StorageNode.OperationMode.REMOVE_MAINTENANCE); performRemoveNodeMaintenance(subjectManager.getOverlord(), storageNode); } else { log.info("Remove node maintenance has already been run for " + storageNode); @@ -241,7 +258,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa node.setMaintenancePending(true); } boolean runRepair = storageNode.isMaintenancePending(); - performRemoveNodeMaintenance(subjectManager.getOverlord(), clusterNodes.get(0), runRepair, + performRemoveNodeMaintenance(subject, clusterNodes.get(0), runRepair, createPropertyListOfAddresses(SEEDS_LIST, clusterNodes)); }
@@ -357,8 +374,9 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa
if (nextNode == null) { log.info("Successfully announced new storage node to storage cluster"); - newStorageNode.setOperationMode(StorageNode.OperationMode.BOOTSTRAP); - prepareNodeForBootstrap(subject, newStorageNode, addresses.deepCopy(false)); + newStorageNode = storageNodeOperationsHandler.setMode(newStorageNode, + StorageNode.OperationMode.BOOTSTRAP); + storageNodeOperationsHandler.bootstrapStorageNode(subject, newStorageNode); } else { announceStorageNode(subject, newStorageNode, nextNode, addresses.deepCopy(false)); } @@ -369,32 +387,31 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public void handleUnannounce(ResourceOperationHistory operationHistory) { StorageNode storageNode = findStorageNode(operationHistory.getResource()); - StorageNode removedStorageNode = null; + StorageNode removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.UNANNOUNCE); switch (operationHistory.getStatus()) { case INPROGRESS: // nothing to do here break; case CANCELED: - removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.UNANNOUNCE); undeploymentOperationCanceled(storageNode, operationHistory, removedStorageNode); break; case FAILURE: - removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.UNANNOUNCE); deploymentOperationFailed(storageNode, operationHistory, removedStorageNode); break; default: // SUCCESS storageNode.setMaintenancePending(false);
- removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.UNANNOUNCE); StorageNode nextNode = takeFromMaintenanceQueue(); Subject subject = getSubject(operationHistory); - Configuration params = operationHistory.getParameters(); - PropertyList addresses = params.getList("addresses");
if (nextNode == null) { log.info("Successfully unannounced " + removedStorageNode + " to storage cluster"); + removedStorageNode = storageNodeOperationsHandler.setMode(removedStorageNode, + StorageNode.OperationMode.UNINSTALL); uninstall(getSubject(operationHistory), removedStorageNode); } else { + Configuration params = operationHistory.getParameters(); + PropertyList addresses = params.getList("addresses"); unannounceStorageNode(subject, nextNode, addresses.deepCopy(false)); } } @@ -428,17 +445,15 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public void handleAddNodeMaintenance(ResourceOperationHistory resourceOperationHistory) { StorageNode storageNode = findStorageNode(resourceOperationHistory.getResource()); - StorageNode newStorageNode = null; + StorageNode newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_MAINTENANCE); switch (resourceOperationHistory.getStatus()) { case INPROGRESS: // nothing to do here return; case CANCELED: - newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_MAINTENANCE); deploymentOperationCanceled(storageNode, resourceOperationHistory, newStorageNode); return; case FAILURE: - newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_MAINTENANCE); deploymentOperationFailed(storageNode, resourceOperationHistory, newStorageNode); return; default: // SUCCESS @@ -448,9 +463,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa
if (nextNode == null) { log.info("Finished running add node maintenance on all cluster nodes"); - // TODO replace this with an UPDATE statement - newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_MAINTENANCE); - newStorageNode.setOperationMode(StorageNode.OperationMode.NORMAL); + storageNodeOperationsHandler.setMode(newStorageNode, StorageNode.OperationMode.NORMAL); } else { Configuration parameters = resourceOperationHistory.getParameters(); boolean runRepair = parameters.getSimple(RUN_REPAIR_PROPERTY).getBooleanValue(); @@ -463,19 +476,39 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa
@Override @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + public void bootstrapStorageNode(Subject subject, StorageNode storageNode) { + List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, + StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL).getResultList(); + clusterNodes.add(storageNode); + prepareNodeForBootstrap(subject, storageNode, createPropertyListOfAddresses("addresses", clusterNodes)); + } + + private void prepareNodeForBootstrap(Subject subject, StorageNode storageNode, PropertyList addresses) { + if (log.isInfoEnabled()) { + log.info("Preparing to bootstrap " + storageNode + " into cluster..."); + } + StorageClusterSettings clusterSettings = storageClusterSettingsManager.getClusterSettings(subject); + Configuration parameters = new Configuration(); + parameters.put(new PropertySimple("cqlPort", clusterSettings.getCqlPort())); + parameters.put(new PropertySimple("gossipPort", clusterSettings.getGossipPort())); + parameters.put(addresses); + + scheduleOperation(subject, storageNode, parameters, "prepareForBootstrap"); + } + + @Override + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public void handleRemoveNodeMaintenance(ResourceOperationHistory operationHistory) { StorageNode storageNode = findStorageNode(operationHistory.getResource()); - StorageNode removedStorageNode = null; + StorageNode removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.REMOVE_MAINTENANCE); switch (operationHistory.getStatus()) { case INPROGRESS: // nothing to do here break; case CANCELED: - removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.REMOVE_MAINTENANCE); undeploymentOperationCanceled(storageNode, operationHistory, removedStorageNode); break; case FAILURE: - removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.REMOVE_MAINTENANCE); undeploymentOperationFailed(storageNode, operationHistory, removedStorageNode); break; default: // SUCCESS @@ -485,8 +518,8 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa
if (nextNode == null) { log.info("Finished running remove node maintenance on all cluster nodes"); - // TODO replace this with an UPDATE statement - removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.REMOVE_MAINTENANCE); + removedStorageNode = storageNodeOperationsHandler.setMode(removedStorageNode, + StorageNode.OperationMode.UNANNOUNCE); unannounceStorageNode(getSubject(operationHistory), removedStorageNode); } else { Configuration parameters = operationHistory.getParameters(); @@ -537,14 +570,11 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa } }
- private void finishUninstall(Subject subject, StorageNode storageNode) { - if (storageNode.getResource() != null) { - log.info("Removing storage node resource " + storageNode.getResource() + " from inventory"); - storageNodeOperationsHandler.detachFromResource(storageNode); - resourceManager.uninventoryResource(subject, storageNode.getResource().getId()); - } - log.info("Removing storage node entity " + storageNode + " from database"); - entityManager.remove(storageNode); + @Override + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + public StorageNode setMode(StorageNode storageNode, StorageNode.OperationMode newMode) { + storageNode.setOperationMode(newMode); + return entityManager.merge(storageNode); }
private Subject getSubject(ResourceOperationHistory resourceOperationHistory) { @@ -644,27 +674,6 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa return null; }
- @Override - public void bootstrapStorageNode(Subject subject, StorageNode storageNode) { - List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, - StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL).getResultList(); - clusterNodes.add(storageNode); - prepareNodeForBootstrap(subject, storageNode, createPropertyListOfAddresses("addresses", clusterNodes)); - } - - private void prepareNodeForBootstrap(Subject subject, StorageNode storageNode, PropertyList addresses) { - if (log.isInfoEnabled()) { - log.info("Preparing to bootstrap " + storageNode + " into cluster..."); - } - StorageClusterSettings clusterSettings = storageClusterSettingsManager.getClusterSettings(subject); - Configuration parameters = new Configuration(); - parameters.put(new PropertySimple("cqlPort", clusterSettings.getCqlPort())); - parameters.put(new PropertySimple("gossipPort", clusterSettings.getGossipPort())); - parameters.put(addresses); - - scheduleOperation(subject, storageNode, parameters, "prepareForBootstrap"); - } - private StorageNode takeFromMaintenanceQueue() { List<StorageNode> storageNodes = entityManager.createQuery("SELECT s FROM StorageNode s WHERE " + "s.operationMode = :operationMode AND s.maintenancePending = :maintenancePending", StorageNode.class) diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java index 5d08dd8..f0a5b98 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java @@ -52,4 +52,6 @@ public interface StorageNodeOperationsHandlerLocal { void performRemoveNodeMaintenance(Subject subject, StorageNode storageNode);
void logError(StorageNode.OperationMode newStorageNodeOperationMode, String error, Exception e); + + StorageNode setMode(StorageNode storageNode, StorageNode.OperationMode newMode); }
commit b85debdf80b0796ff14beff17403b99523478914 Author: Stefan Negrea snegrea@redhat.com Date: Mon Aug 19 15:55:38 2013 -0500
[BZ 998049] Remove all core domain dependencies from Cassandra common modules.
This was causing maven build problems because core domain would need to be built before database util would run and setup the database.
diff --git a/.classpath b/.classpath index 335117b..63f9170 100644 --- a/.classpath +++ b/.classpath @@ -215,6 +215,8 @@ <classpathentry kind="src" path="modules/helpers/ldap-tool/src/main/java"/> <classpathentry kind="src" path="modules/common/cassandra-schema/src/test/java"/> <classpathentry kind="src" path="modules/plugins/rhq-storage/src/test/java"/> + <classpathentry kind="src" path="modules/helpers/metrics-simulator/src/main/java"/> + <classpathentry kind="src" path="modules/common/cassandra-util/src/test/java"/> <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.7"/> <classpathentry exported="true" kind="var" path="M2_REPO/org/apache/httpcomponents/httpclient/4.2.3/httpclient-4.2.3.jar" sourcepath="M2_REPO/org/apache/httpcomponents/httpclient/4.2.3/httpclient-4.2.3-sources.jar"/> <classpathentry exported="true" kind="var" path="M2_REPO/commons-io/commons-io/2.1/commons-io-2.1.jar" sourcepath="M2_REPO/commons-io/commons-io/2.1/commons-io-2.1-sources.jar"/> diff --git a/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java b/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java index 1aeef43..7c59114 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java @@ -26,7 +26,6 @@ package org.rhq.cassandra.ccm.arquillian;
import java.io.File; -import java.util.List; import java.util.concurrent.Callable;
import org.jboss.arquillian.config.descriptor.api.ArquillianDescriptor; @@ -62,7 +61,6 @@ import org.rhq.cassandra.ClusterInitService; import org.rhq.cassandra.DeploymentOptions; import org.rhq.cassandra.DeploymentOptionsFactory; import org.rhq.cassandra.schema.SchemaManager; -import org.rhq.core.domain.cloud.StorageNode;
/** * @author John Sanda @@ -114,7 +112,10 @@ public class CCMSuiteDeploymentExtension implements LoadableExtension {
SchemaManager schemaManager; ClusterInitService clusterInitService = new ClusterInitService(); - List<StorageNode> nodes = null; + + String[] nodes = null; + int[] jmxPorts = null; + int cqlPort = -1;
if (!Boolean.valueOf(System.getProperty("itest.use-external-storage-node", "false"))) {
@@ -131,13 +132,17 @@ public class CCMSuiteDeploymentExtension implements LoadableExtension { options.setStartRpc(true);
ccm = new CassandraClusterManager(options); - nodes = ccm.createCluster(); + ccm.createCluster(); + + nodes = ccm.getNodes(); + jmxPorts = ccm.getJmxPorts(); + cqlPort = ccm.getCqlPort();
ccm.startCluster(false);
try { - clusterInitService.waitForClusterToStart(nodes, nodes.size(), 1500, 20, 5); - schemaManager = new SchemaManager("rhqadmin", "rhqadmin", nodes); + clusterInitService.waitForClusterToStart(nodes, jmxPorts, nodes.length, 20, 5, 1500); + schemaManager = new SchemaManager("rhqadmin", "rhqadmin", nodes, cqlPort);
} catch (Exception e) { if (null != ccm) { @@ -148,7 +153,10 @@ public class CCMSuiteDeploymentExtension implements LoadableExtension { } else { try { String seed = System.getProperty("rhq.cassandra.seeds", "127.0.0.1|7299|9042"); - schemaManager = new SchemaManager("rhqadmin", "rhqadmin", seed); + nodes = parseNodeAddresses(seed); + cqlPort = parseNodeCqlPort(seed); + jmxPorts = parseNodeJmxPorts(seed); + schemaManager = new SchemaManager("rhqadmin", "rhqadmin", nodes, cqlPort);
} catch (Exception e) { throw new RuntimeException("External Cassandra initialization failed", e); @@ -157,7 +165,7 @@ public class CCMSuiteDeploymentExtension implements LoadableExtension {
try { schemaManager.install(); - clusterInitService.waitForSchemaAgreement(nodes); + clusterInitService.waitForSchemaAgreement(nodes, jmxPorts); schemaManager.updateTopology(); } catch (Exception e) { if (null != ccm) { @@ -260,5 +268,58 @@ public class CCMSuiteDeploymentExtension implements LoadableExtension { throw new RuntimeException("Could not load defined deploymentClass: " + className, e); } } + + private String[] parseNodeAddresses(String s) { + String[] unparsedNodes = s.split(","); + + String[] nodes = new String[unparsedNodes.length]; + + for (int index = 0; index < 0; index++) { + String[] params = unparsedNodes[index].split("\|"); + if (params.length != 3) { + throw new IllegalArgumentException( + "Expected string of the form, hostname|jmxPort|nativeTransportPort: [" + s + "]"); + } + + nodes[index] = params[0]; + } + + return nodes; + } + + private int[] parseNodeJmxPorts(String s) { + String[] unparsedNodes = s.split(","); + + int[] jmxPorts = new int[unparsedNodes.length]; + + for (int index = 0; index < 0; index++) { + String[] params = unparsedNodes[index].split("\|"); + if (params.length != 3) { + throw new IllegalArgumentException( + "Expected string of the form, hostname|jmxPort|nativeTransportPort: [" + s + "]"); + } + + jmxPorts[index] = Integer.parseInt(params[1]); + } + + return jmxPorts; + } + + private int parseNodeCqlPort(String s) { + String[] unparsedNodes = s.split(","); + + for (String unparsedNode : unparsedNodes) { + String[] params = unparsedNode.split("\|"); + if (params.length != 3) { + throw new IllegalArgumentException( + "Expected string of the form, hostname|jmxPort|nativeTransportPort: [" + s + "]"); + } + + return Integer.parseInt(params[2]); + } + + throw new IllegalArgumentException("Seed property is not valid [" + s + "]"); + } + } } \ No newline at end of file diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java index c8bb2ef..4a02e1d 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java @@ -46,7 +46,6 @@ import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
-import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.pluginapi.util.ProcessExecutionUtility; import org.rhq.core.system.OperatingSystemType; import org.rhq.core.system.ProcessExecution; @@ -68,6 +67,11 @@ public class CassandraClusterManager { private List<File> installedNodeDirs = new ArrayList<File>(); private Map<Integer, Process> nodeProcessMap = new HashMap<Integer, Process>();
+ private String[] nodes; + private int[] jmxPorts; + private int cqlPort; + + public CassandraClusterManager() { this(new DeploymentOptionsFactory().newDeploymentOptions()); } @@ -90,7 +94,28 @@ public class CassandraClusterManager { } }
- public List<StorageNode> createCluster() { + /** + * @return addresses of storage cluster nodes + */ + public String[] getNodes() { + return nodes; + } + + /** + * @return the JMX ports + */ + public int[] getJmxPorts() { + return jmxPorts; + } + + /** + * @return the CQL Port + */ + public int getCqlPort() { + return cqlPort; + } + + public void createCluster() { if (log.isDebugEnabled()) { log.debug("Installing embedded " + deploymentOptions.getNumNodes() + " node cluster to " + deploymentOptions.getClusterDir()); @@ -104,11 +129,10 @@ public class CassandraClusterManager { if (installedMarker.exists()) { log.info("It appears that the cluster already exists in " + clusterDir); log.info("Skipping cluster creation."); - return calculateNodes(); + getStorageClusterConfiguration(); } FileUtil.purge(clusterDir, false);
- List<StorageNode> nodes = new ArrayList<StorageNode>(deploymentOptions.getNumNodes()); String seeds = collectionToString(calculateLocalIPAddresses(deploymentOptions.getNumNodes())); Set<InetAddress> ipAddresses = null;
@@ -118,6 +142,10 @@ public class CassandraClusterManager { throw new RuntimeException("Failed to get cluster IP addresses", e); }
+ this.nodes = new String[deploymentOptions.getNumNodes()]; + this.jmxPorts = new int[deploymentOptions.getNumNodes()]; + this.cqlPort = deploymentOptions.getNativeTransportPort(); + for (int i = 0; i < deploymentOptions.getNumNodes(); ++i) { File basedir = new File(deploymentOptions.getClusterDir(), "node" + i); String address = getLocalIPAddress(i + 1); @@ -142,15 +170,11 @@ public class CassandraClusterManager { deployer.unzipDistro(); deployer.applyConfigChanges(); deployer.updateFilePerms(); - - StorageNode storageNode = new StorageNode(); - storageNode.setAddress(address); - storageNode.setJmxPort(deploymentOptions.getJmxPort() + i); - storageNode.setCqlPort(nodeOptions.getNativeTransportPort()); - nodes.add(storageNode); - deployer.updateStorageAuthConf(ipAddresses);
+ this.nodes[i] = address; + this.jmxPorts[i] = deploymentOptions.getJmxPort() + i; + installedNodeDirs.add(basedir); } catch (Exception e) { log.error("Failed to install node at " + basedir); @@ -162,7 +186,6 @@ public class CassandraClusterManager { } catch (IOException e) { log.warn("Failed to write installed file marker to " + installedMarker, e); } - return nodes; }
private void updateStorageAuthConf(File basedir) { @@ -210,16 +233,14 @@ public class CassandraClusterManager { return ipAddresses; }
- private List<StorageNode> calculateNodes() { - List<StorageNode> nodes = new ArrayList<StorageNode>(deploymentOptions.getNumNodes()); + private void getStorageClusterConfiguration() { + this.nodes = new String[deploymentOptions.getNumNodes()]; for (int i = 0; i < deploymentOptions.getNumNodes(); ++i) { - StorageNode storageNode = new StorageNode(); - storageNode.setAddress(getLocalIPAddress(i + 1)); - storageNode.setJmxPort(deploymentOptions.getJmxPort() + i); - storageNode.setCqlPort(deploymentOptions.getNativeTransportPort()); - nodes.add(storageNode); + this.nodes[i] = getLocalIPAddress(i + 1); + this.jmxPorts[i] = deploymentOptions.getJmxPort() + i; } - return nodes; + + this.cqlPort = deploymentOptions.getNativeTransportPort(); }
public void startCluster() { @@ -230,9 +251,9 @@ public class CassandraClusterManager { startCluster(getNodeIds());
if (waitForClusterToStart) { - List<StorageNode> nodes = calculateNodes(); + getStorageClusterConfiguration(); ClusterInitService clusterInitService = new ClusterInitService(); - clusterInitService.waitForClusterToStart(nodes, nodes.size(), 20); + clusterInitService.waitForClusterToStart(this.nodes, this.jmxPorts, this.nodes.length, 20); } }
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/ClusterInitService.java b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/ClusterInitService.java index 83851c5..cbbfad5 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/ClusterInitService.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/ClusterInitService.java @@ -28,7 +28,6 @@ package org.rhq.cassandra; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedList; -import java.util.List; import java.util.Map; import java.util.Queue; import java.util.Set; @@ -42,8 +41,6 @@ import javax.management.remote.JMXServiceURL; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
-import org.rhq.core.domain.cloud.StorageNode; - /** * This class provides operations to ensure a cluster is initialized and in a consistent * state. It does not offer functionality for initializing a cluster but rather to make @@ -56,13 +53,25 @@ public final class ClusterInitService {
private final Log log = LogFactory.getLog(ClusterInitService.class);
- public boolean ping(List<StorageNode> storageNodes, int numHosts) { + private static final String JMX_CONNECTION_STRING = "service:jmx:rmi:///jndi/rmi://%s:%s/jmxrmi"; + + /** + * Pings the storage nodes to verify if they are available and native transport + * is running. + * + * @param storageNodes storage node addresses + * @param jmxPorts JMX ports + * @param numHosts minimum number of active hosts + * + * @return [true] cluster available with at least minimum number of hosts available, [false] otherwise + */ + public boolean ping(String[] storageNodes, int[] jmxPorts, int numHosts) { int connections = 0; long sleep = 100;
- for (StorageNode host : storageNodes) { + for (int index = 0; index < jmxPorts.length; index++) { try { - boolean isNativeTransportRunning = this.isNativeTransportRunning(host); + boolean isNativeTransportRunning = this.isNativeTransportRunning(storageNodes[index], jmxPorts[index]); if (isNativeTransportRunning) { ++connections; } @@ -71,7 +80,8 @@ public final class ClusterInitService { } } catch (Exception e) { if (log.isDebugEnabled()) { - log.debug("Unable to open JMX connection to cassandra node [" + host + "]", e); + log.debug("Unable to open JMX connection on port [" + jmxPorts[index] + "] to cassandra node [" + + storageNodes[index] + "]", e); } return false; } @@ -89,12 +99,12 @@ public final class ClusterInitService { * hosts. A runtime exception will be thrown after 10 failed retries. * <br/><br/> * After connecting to all nodes, this method will then sleep for a fixed delay. - * See {@link #waitForClusterToStart(java.util.List, int, int)} for details. - * - * @param hosts The cluster nodes to which a connection should be made + * See {@link #waitForClusterToStart(int, java.util.List, int)} for details. + * @param storageNodes The cluster nodes to which a connection should be made + * @param jmxPorts JMX port for each cluster node address */ - public void waitForClusterToStart(List<StorageNode> storageNodes) { - waitForClusterToStart(storageNodes, storageNodes.size(), 10); + public void waitForClusterToStart(String[] storageNodes, int jmxPorts[]) { + waitForClusterToStart(storageNodes, jmxPorts, storageNodes.length, 10); }
/** @@ -109,15 +119,14 @@ public final class ClusterInitService { * schema and to create the cassandra super user. Cassandra has a hard-coded delay of * 10 sceonds before it creates the super user, which means the rhq schema cannot be * created before that. - * - * @param hosts The cluster nodes to which a connection should be made * @param numHosts The number of hosts to which a successful connection has to be made * before returning. * @param retries The number of times to retry connecting. A runtime exception will be * thrown when the number of failed connections exceeds this value. + * @param hosts The cluster nodes to which a connection should be made */ - public void waitForClusterToStart(List<StorageNode> storageNodes, int numHosts, int retries) { - waitForClusterToStart(storageNodes, numHosts, 250, retries, 1); + public void waitForClusterToStart(String[] storageNodes, int jmxPorts[], int numHosts, int retries) { + waitForClusterToStart(storageNodes, jmxPorts, numHosts, 250, retries, 1); }
/** @@ -132,17 +141,16 @@ public final class ClusterInitService { * schema and to create the cassandra super user. Cassandra has a hard-coded delay of * 10 sceonds before it creates the super user, which means the rhq schema cannot be * created before that. - * - * @param hosts The cluster nodes to which a connection should be made * @param numHosts The number of hosts to which a successful connection has to be made * before returning. * @param delay The amount of time wait between attempts to make a connection * @param retries The number of times to retry connecting. A runtime exception will be * thrown when the number of failed connections exceeds this value. * @param initialWait The amount of seconds before first try. + * @param hosts The cluster nodes to which a connection should be made */ - public void waitForClusterToStart(List<StorageNode> storageNodes, int numHosts, long delay, int retries, - int initialWait) { + public void waitForClusterToStart(String[] storageNodes, int jmxPorts[], int numHosts, long delay, + int retries, int initialWait) { if (initialWait > 0) { try { if (log.isDebugEnabled()) { @@ -155,23 +163,28 @@ public final class ClusterInitService {
int connections = 0; int failedConnections = 0; - Queue<StorageNode> queue = new LinkedList<StorageNode>(storageNodes); - StorageNode storageNode = queue.poll(); + Queue<Integer> queue = new LinkedList<Integer>(); + for (int index = 0; index < storageNodes.length; index++) { + queue.add(index); + } + + Integer storageNodeIndex = queue.poll();
- while (storageNode != null) { + while (storageNodeIndex != null) { if (failedConnections >= retries) { throw new RuntimeException("Unable to verify that cluster nodes have started after " + failedConnections + " failed attempts"); } try { - boolean isNativeTransportRunning = this.isNativeTransportRunning(storageNode); + boolean isNativeTransportRunning = isNativeTransportRunning(storageNodes[storageNodeIndex], + jmxPorts[storageNodeIndex]); if (log.isDebugEnabled() && isNativeTransportRunning) { - log.debug("Successfully connected to cassandra node [" + storageNode + "]"); + log.debug("Successfully connected to cassandra node [" + storageNodes[storageNodeIndex] + "]"); } if (isNativeTransportRunning) { ++connections; } else { - queue.offer(storageNode); + queue.offer(storageNodeIndex); } if (connections == numHosts) { if (log.isDebugEnabled()) { @@ -186,9 +199,10 @@ public final class ClusterInitService { } } catch (Exception e) { ++failedConnections; - queue.offer(storageNode); + queue.offer(storageNodeIndex); if (log.isDebugEnabled()) { - log.debug("Unable to open JMX connection to cassandra node [" + storageNode + "].", e); + log.debug("Unable to open JMX connection on port [" + jmxPorts[storageNodeIndex] + + "] to cassandra node [" + storageNodes[storageNodeIndex] + "].", e); } else if (log.isInfoEnabled()) { log.debug("Unable to open connection to cassandra node."); } @@ -197,7 +211,7 @@ public final class ClusterInitService { Thread.sleep(delay); } catch (InterruptedException e) { } - storageNode = queue.poll(); + storageNodeIndex = queue.poll(); } }
@@ -209,8 +223,8 @@ public final class ClusterInitService { * * @param hosts The cluster nodes */ - public void waitForSchemaAgreement(List<StorageNode> storageNodes) throws Exception { - if (storageNodes == null) { + public void waitForSchemaAgreement(String[] storageNodes, int[] jmxPorts) throws Exception { + if (storageNodes == null || storageNodes.length == 0) { return; }
@@ -219,8 +233,8 @@ public final class ClusterInitService {
while (!schemaInAgreement) { Set<String> schemaVersions = new HashSet<String>(); - for (StorageNode host : storageNodes) { - String otherSchchemaVersion = getSchemaVersionForNode(host); + for (int index = 0; index < storageNodes.length; index++) { + String otherSchchemaVersion = getSchemaVersionForNode(storageNodes[index], jmxPorts[index]); if (otherSchchemaVersion != null) { schemaVersions.add(otherSchchemaVersion); } @@ -256,9 +270,9 @@ public final class ClusterInitService { } }
- public boolean isNativeTransportRunning(StorageNode storageNode) throws Exception { + public boolean isNativeTransportRunning(String storageNode, int jmxPort) throws Exception { Boolean nativeTransportRunning = false; - String url = storageNode.getJMXConnectionURL(); + String url = getJMXConnectionURL(storageNode, jmxPort); JMXServiceURL serviceURL = new JMXServiceURL(url); Map<String, String> env = new HashMap<String, String>(); JMXConnector connector = null; @@ -292,8 +306,8 @@ public final class ClusterInitService { return nativeTransportRunning; }
- private String getSchemaVersionForNode(StorageNode storageNode) throws Exception { - String url = storageNode.getJMXConnectionURL(); + private String getSchemaVersionForNode(String storageNode, int jmxPort) throws Exception { + String url = this.getJMXConnectionURL(storageNode, jmxPort); JMXServiceURL serviceURL = new JMXServiceURL(url); Map<String, String> env = new HashMap<String, String>(); JMXConnector connector = null; @@ -326,4 +340,17 @@ public final class ClusterInitService { } return null; } + + /** + * Constructs the JMX connection URL based on the node address and + * JMX port + * + * @param address + * @param jmxPort + * @return + */ + private String getJMXConnectionURL(String address, int jmxPort) { + String[] split = JMX_CONNECTION_STRING.split("%s"); + return split[0] + address + split[1] + jmxPort + split[2]; + } } \ No newline at end of file diff --git a/modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java b/modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java index f50535c..310d7a2 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java @@ -26,7 +26,6 @@ package org.rhq.cassandra.ccm.maven;
import java.io.File; -import java.util.List;
import org.apache.maven.plugin.AbstractMojo; import org.apache.maven.plugin.MojoExecutionException; @@ -38,7 +37,6 @@ import org.rhq.cassandra.CassandraClusterManager; import org.rhq.cassandra.DeploymentOptions; import org.rhq.cassandra.DeploymentOptionsFactory; import org.rhq.cassandra.schema.SchemaManager; -import org.rhq.core.domain.cloud.StorageNode;
/** * @author John Sanda @@ -63,14 +61,14 @@ public class DeployMojo extends AbstractMojo {
long start = System.currentTimeMillis(); getLog().info("Creating " + numNodes + " cluster in " + clusterDir); - List<StorageNode> nodes = ccm.createCluster(); + ccm.createCluster();
getLog().info("Starting cluster nodes"); ccm.startCluster();
getLog().info("Installing RHQ schema"); SchemaManager schemaManager = new SchemaManager(deploymentOptions.getUsername(), - deploymentOptions.getPassword(), nodes); + deploymentOptions.getPassword(), ccm.getNodes(), ccm.getCqlPort());
try { schemaManager.install(); diff --git a/modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java b/modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java index a9292f7..48d047d 100644 --- a/modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java +++ b/modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java @@ -27,7 +27,6 @@ package org.rhq.cassandra;
import java.io.File; import java.lang.reflect.Method; -import java.util.List;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -36,7 +35,6 @@ import org.testng.IInvokedMethodListener; import org.testng.ITestResult;
import org.rhq.cassandra.schema.SchemaManager; -import org.rhq.core.domain.cloud.StorageNode;
/** * @author John Sanda @@ -105,13 +103,15 @@ public class CCMTestNGListener implements IInvokedMethodListener { // we cannot initialize ccm here. ccm = new CassandraClusterManager(deploymentOptions); ClusterInitService clusterInitService = new ClusterInitService(); + ccm.createCluster();
- List<StorageNode> nodes = ccm.createCluster(); + String[] nodes = ccm.getNodes(); + int[] jmxPorts = ccm.getJmxPorts();
if (System.getProperty("rhq.cassandra.cluster.skip-shutdown") == null) { - for (StorageNode node : nodes) { + for (int index = 0; index < nodes.length; index++) { try { - if (clusterInitService.isNativeTransportRunning(node)) { + if (clusterInitService.isNativeTransportRunning(nodes[index], jmxPorts[index])) { throw new RuntimeException("A cluster is already running on the same ports."); } } catch (Exception e) { @@ -122,12 +122,13 @@ public class CCMTestNGListener implements IInvokedMethodListener { ccm.startCluster(false);
- clusterInitService.waitForClusterToStart(nodes, nodes.size(), 1500, 20, 2); + clusterInitService.waitForClusterToStart(nodes, jmxPorts, nodes.length, 20, 2, 1500);
- SchemaManager schemaManager = new SchemaManager(annotation.username(), annotation.password(), nodes); + SchemaManager schemaManager = new SchemaManager(annotation.username(), annotation.password(), nodes, + ccm.getCqlPort()); schemaManager.install(); if (annotation.waitForSchemaAgreement()) { - clusterInitService.waitForSchemaAgreement(nodes); + clusterInitService.waitForSchemaAgreement(nodes, jmxPorts); } schemaManager.updateTopology(); } diff --git a/modules/common/cassandra-schema/pom.xml b/modules/common/cassandra-schema/pom.xml index 6fb2915..077cdb9 100644 --- a/modules/common/cassandra-schema/pom.xml +++ b/modules/common/cassandra-schema/pom.xml @@ -13,16 +13,10 @@ <name>RHQ Cassandra Schema</name>
<dependencies> - <dependency> - <groupId>${project.groupId}</groupId> - <artifactId>rhq-cassandra-ccm-core</artifactId> - <version>${project.version}</version> - </dependency> - - <dependency> - <groupId>${project.groupId}</groupId> - <artifactId>rhq-cassandra-util</artifactId> - <version>${project.version}</version> + <dependency> + <groupId>${project.groupId}</groupId> + <artifactId>rhq-cassandra-util</artifactId> + <version>${project.version}</version> </dependency>
<dependency> @@ -36,13 +30,6 @@ </dependency>
<dependency> - <groupId>${project.groupId}</groupId> - <artifactId>rhq-core-domain</artifactId> - <version>${project.version}</version> - </dependency> - - - <dependency> <groupId>com.datastax.cassandra</groupId> <artifactId>cassandra-driver-core</artifactId> <version>${cassandra.driver.version}</version> diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java index 7b8c520..7dcef1b 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java @@ -40,8 +40,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
import org.rhq.cassandra.util.ClusterBuilder; -import org.rhq.core.domain.cloud.StorageNode; -import org.rhq.core.util.StringUtil;
/** * @author Stefan Negrea @@ -71,13 +69,15 @@ abstract class AbstractManager { private Session session; private final String username; private final String password; - private List<StorageNode> nodes = new ArrayList<StorageNode>(); + private final int cqlPort; + private final String[] nodes; private final UpdateFile managementTasks;
- protected AbstractManager(String username, String password, List<StorageNode> nodes) { + protected AbstractManager(String username, String password, String[] nodes, int cqlPort) { try { this.username = username; this.password = password; + this.cqlPort = cqlPort; this.nodes = nodes; } catch (NoHostAvailableException e) { throw new RuntimeException("Unable create storage node session.", e); @@ -108,15 +108,11 @@ abstract class AbstractManager { protected void initClusterSession(String username, String password) { shutdownClusterConnection();
- String[] hostNames = new String[nodes.size()]; - for (int i = 0; i < hostNames.length; ++i) { - hostNames[i] = nodes.get(i).getAddress(); - }
- log.info("Initializing session to connect to " + StringUtil.arrayToString(hostNames)); + log.info("Initializing storage node session.");
- Cluster cluster = new ClusterBuilder().addContactPoints(hostNames).withCredentials(username, password) - .withPort(nodes.get(0).getCqlPort()).withCompression(Compression.NONE).build(); + Cluster cluster = new ClusterBuilder().addContactPoints(nodes).withCredentials(username, password) + .withPort(this.getCqlPort()).withCompression(Compression.NONE).build();
log.info("Cluster connection configured.");
@@ -140,7 +136,7 @@ abstract class AbstractManager { * @return cluster size */ protected int getClusterSize() { - return nodes.size(); + return nodes.length; }
/** @@ -158,6 +154,13 @@ abstract class AbstractManager { }
/** + * @return the cqlPort + */ + protected int getCqlPort() { + return cqlPort; + } + + /** * Runs a CQL query to check the existence of the RHQ user on the storage cluster. * * @return true if the RHQ user exists, false otherwise diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java index 1a82779..fdad697 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java @@ -25,7 +25,6 @@
package org.rhq.cassandra.schema;
-import java.util.ArrayList; import java.util.Arrays; import java.util.List;
@@ -34,8 +33,6 @@ import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.apache.log4j.PatternLayout;
-import org.rhq.core.domain.cloud.StorageNode; - /** * @author John Sanda */ @@ -51,19 +48,29 @@ public class SchemaManager { */ private final String password;
- private final List<StorageNode> nodes = new ArrayList<StorageNode>(); + /** + * Node addresses + */ + private final String[] nodes;
/** * - * @param username The username RHQ will use to connect to the storage cluster. - * @param password The password RHQ will use to connect to the storage cluster. - * @param nodes A list of seeds nodes that are assumed to be already running and - * clustered prior to apply schema changes. The format for each node - * should be address|jmx_port|cql_port,address|jmx_port|cql_port. - * Each node consists of three fields that are pipe-delimited. */ - public SchemaManager(String username, String password, String... nodes) { - this(username, password, parseNodeInformation(nodes)); + private final int cqlPort; + + /** + * + * @param username The username RHQ will use to connect to the storage cluster + * @param password The password RHQ will use to connect to the storage cluster + * @param nodes A list of seeds nodes that are assumed to be already running and + * clustered prior to apply schema changes. + * @param cqlPort The native CQL port for the storage cluster + */ + public SchemaManager(String username, String password, String[] nodes, int cqlPort) { + this.username = username; + this.password = password; + this.cqlPort = cqlPort; + this.nodes = nodes; }
/** @@ -72,11 +79,13 @@ public class SchemaManager { * @param password The password RHQ will use to connect to the storage cluster. * @param nodes A list of seeds nodes that are assumed to be already running and * clustered prior to apply schema changes. + * @param cqlPort The native CQL port for the storage cluster */ - public SchemaManager(String username, String password, List<StorageNode> nodes) { + public SchemaManager(String username, String password, List<String> nodes, int cqlPort) { this.username = username; this.password = password; - this.nodes.addAll(nodes); + this.cqlPort = cqlPort; + this.nodes = nodes.toArray(new String[nodes.size()]); }
/** @@ -85,7 +94,7 @@ public class SchemaManager { * @throws Exception */ public void install() throws Exception { - VersionManager version = new VersionManager(username, password, nodes); + VersionManager version = new VersionManager(username, password, nodes, cqlPort); version.install(); }
@@ -96,7 +105,7 @@ public class SchemaManager { * @throws Exception */ public void checkCompatibility() throws Exception { - VersionManager version = new VersionManager(username, password, nodes); + VersionManager version = new VersionManager(username, password, nodes, cqlPort); version.checkCompatibility(); }
@@ -106,7 +115,7 @@ public class SchemaManager { * @throws Exception */ public void drop() throws Exception { - VersionManager version = new VersionManager(username, password, nodes); + VersionManager version = new VersionManager(username, password, nodes, cqlPort); version.drop(); }
@@ -118,7 +127,7 @@ public class SchemaManager { * @throws Exception */ public void updateTopology() throws Exception { - TopologyManager topology = new TopologyManager(username, password, nodes); + TopologyManager topology = new TopologyManager(username, password, nodes, cqlPort); topology.updateTopology(); }
@@ -127,28 +136,11 @@ public class SchemaManager { * * @return list of storage nodes */ - public List<StorageNode> getStorageNodes() { + protected String[] getStorageNodes() { return nodes; }
/** - * Parse raw string that contains the list of storage nodes. - * - * @param nodes list of storage nodes - * @return - */ - private static List<StorageNode> parseNodeInformation(String... nodes) { - List<StorageNode> parsedNodes = new ArrayList<StorageNode>(); - for (String node : nodes) { - StorageNode storageNode = new StorageNode(); - storageNode.parseNodeInformation(node); - parsedNodes.add(storageNode); - } - - return parsedNodes; - } - - /** * A main runner used for direct usage of the schema manager. * * @param args arguments @@ -164,19 +156,19 @@ public class SchemaManager { migratorLogging.setLevel(Level.ALL);
if (args.length < 4) { - System.out.println("Usage : command username password nodes..."); + System.out.println("Usage : command username password cqlPort nodes..."); System.out.println("\n"); System.out.println("Commands : install | drop | topology"); - System.out.println("Node format: hostname|jmxPort|cqlPort"); return; }
String command = args[0]; String username = args[1]; String password = args[2]; - String[] hosts = Arrays.copyOfRange(args, 3, args.length); + int cqlPort = Integer.parseInt(args[3]); + String[] hosts = Arrays.copyOfRange(args, 4, args.length);
- SchemaManager schemaManager = new SchemaManager(username, password, hosts); + SchemaManager schemaManager = new SchemaManager(username, password, hosts, cqlPort);
if ("install".equalsIgnoreCase(command)) { schemaManager.install(); diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java index 6c08faa..481c006 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java @@ -1,37 +1,33 @@ /* * - * * RHQ Management Platform - * * Copyright (C) 2005-2012 Red Hat, Inc. - * * All rights reserved. - * * - * * This program is free software; you can redistribute it and/or modify - * * it under the terms of the GNU General Public License, version 2, as - * * published by the Free Software Foundation, and/or the GNU Lesser - * * General Public License, version 2.1, also as published by the Free - * * Software Foundation. - * * - * * This program is distributed in the hope that it will be useful, - * * but WITHOUT ANY WARRANTY; without even the implied warranty of - * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * * GNU General Public License and the GNU Lesser General Public License - * * for more details. - * * - * * You should have received a copy of the GNU General Public License - * * and the GNU Lesser General Public License along with this program; - * * if not, write to the Free Software Foundation, Inc., - * * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * RHQ Management Platform + * Copyright (C) 2005-2012 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation, and/or the GNU Lesser + * General Public License, version 2.1, also as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License and the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * and the GNU Lesser General Public License along with this program; + * if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * */
package org.rhq.cassandra.schema;
-import java.util.List; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
-import org.rhq.core.domain.cloud.StorageNode; - /** * @author Stefan Negrea */ @@ -56,8 +52,8 @@ class TopologyManager extends AbstractManager { } }
- public TopologyManager(String username, String password, List<StorageNode> nodes) { - super(username, password, nodes); + public TopologyManager(String username, String password, String[] nodes, int cqlPort) { + super(username, password, nodes, cqlPort); }
/** diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java index fe6ddf9..05cee25 100644 --- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java +++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java @@ -25,7 +25,6 @@
package org.rhq.cassandra.schema;
-import java.util.List; import java.util.Properties; import java.util.UUID;
@@ -37,7 +36,6 @@ import org.apache.commons.logging.LogFactory; import org.rhq.cassandra.schema.exception.InstalledSchemaTooAdvancedException; import org.rhq.cassandra.schema.exception.InstalledSchemaTooOldException; import org.rhq.cassandra.schema.exception.SchemaNotInstalledException; -import org.rhq.core.domain.cloud.StorageNode;
/** * @author Stefan Negrea @@ -64,8 +62,8 @@ class VersionManager extends AbstractManager { } }
- public VersionManager(String username, String password, List<StorageNode> nodes) throws Exception { - super(username, password, nodes); + public VersionManager(String username, String password, String[] nodes, int cqlPort) throws Exception { + super(username, password, nodes, cqlPort); }
/** diff --git a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java index 9fc389c..42e531f 100644 --- a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java +++ b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java @@ -1,8 +1,11 @@ package org.rhq.cassandra.util;
+import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; +import java.io.FileOutputStream; import java.io.FileWriter; import java.io.IOException; import java.util.List; @@ -11,9 +14,6 @@ import java.util.Map; import org.yaml.snakeyaml.DumperOptions; import org.yaml.snakeyaml.Yaml;
-import org.rhq.core.util.StringUtil; -import org.rhq.core.util.file.FileUtil; - /** * @author John Sanda */ @@ -57,7 +57,7 @@ public class ConfigEditor {
public void restore() { try { - FileUtil.copyFile(backupFile, configFile); + this.copyFile(backupFile, configFile); backupFile.delete(); yaml = null; config = null; @@ -70,7 +70,7 @@ public class ConfigEditor { private void createBackup() { backupFile = new File(configFile.getParent(), "." + configFile.getName() + ".bak"); try { - FileUtil.copyFile(configFile, backupFile); + this.copyFile(configFile, backupFile); } catch (IOException e) { throw new ConfigEditorException("Failed to create " + backupFile, e); } @@ -113,7 +113,16 @@ public class ConfigEditor { Map seedProvider = (Map) seedProviderList.get(0); List paramsList = (List) seedProvider.get("parameters"); Map params = (Map) paramsList.get(0); - params.put("seeds", StringUtil.arrayToString(seeds)); + + StringBuilder seedsString = new StringBuilder(); + for (int i = 0; i < seeds.length; i++) { + if (i > 0) { + seedsString.append(","); + } + + seedsString.append(seeds[i]); + } + params.put("seeds", seedsString.toString()); }
public Integer getNativeTransportPort() { @@ -132,4 +141,24 @@ public class ConfigEditor { config.put("storage_port", port); }
+ public static void copyFile(File inFile, File outFile) throws FileNotFoundException, IOException { + BufferedInputStream is = new BufferedInputStream(new FileInputStream(inFile)); + BufferedOutputStream os = new BufferedOutputStream(new FileOutputStream(outFile)); + + int bufferSize = 32768; + try { + is = new BufferedInputStream(is, bufferSize); + byte[] buffer = new byte[bufferSize]; + for (int bytesRead = is.read(buffer); bytesRead != -1; bytesRead = is.read(buffer)) { + os.write(buffer, 0, bytesRead); + } + os.flush(); + } catch (IOException ioe) { + throw new RuntimeException("Stream data cannot be copied", ioe); + } finally { + os.close(); + is.close(); + } + } + } diff --git a/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java b/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java index 9c3cc16..d101fc2 100644 --- a/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java +++ b/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java @@ -3,10 +3,14 @@ package org.rhq.cassandra.util; import static java.util.Arrays.asList; import static org.testng.Assert.assertEquals;
+import java.io.BufferedInputStream; import java.io.File; import java.io.FileInputStream; +import java.io.FileNotFoundException; import java.io.FileOutputStream; +import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.lang.reflect.Method;
import org.apache.cassandra.config.Config; @@ -17,9 +21,6 @@ import org.yaml.snakeyaml.Loader; import org.yaml.snakeyaml.TypeDescription; import org.yaml.snakeyaml.Yaml;
-import org.rhq.core.util.file.FileUtil; -import org.rhq.core.util.stream.StreamUtil; - /** * @author John Sanda */ @@ -33,14 +34,14 @@ public class ConfigEditorTest { public void initTestDir(Method test) throws Exception { File dir = new File(getClass().getResource(".").toURI()); basedir = new File(dir, getClass().getSimpleName() + "/" + test.getName()); - FileUtil.purge(basedir, true); + purge(basedir, true); basedir.mkdirs();
configFile = new File(basedir, "cassandra.yaml");
InputStream inputStream = getClass().getResourceAsStream("/cassandra.yaml"); FileOutputStream outputStream = new FileOutputStream(configFile); - StreamUtil.copy(inputStream, outputStream); + copyStreams(inputStream, outputStream); }
@Test @@ -119,4 +120,40 @@ public class ConfigEditorTest { return (Config) yaml.load(inputStream); }
+ private static void purge(File dir, boolean deleteIt) { + if (dir != null) { + if (dir.isDirectory()) { + File[] doomedFiles = dir.listFiles(); + if (doomedFiles != null) { + for (File doomedFile : doomedFiles) { + purge(doomedFile, true); // call this method recursively + } + } + } + + if (deleteIt) { + dir.delete(); + } + } + + return; + } + + public static void copyStreams(InputStream is, OutputStream os) throws FileNotFoundException, IOException { + int bufferSize = 32768; + try { + is = new BufferedInputStream(is, bufferSize); + byte[] buffer = new byte[bufferSize]; + for (int bytesRead = is.read(buffer); bytesRead != -1; bytesRead = is.read(buffer)) { + os.write(buffer, 0, bytesRead); + } + os.flush(); + } catch (IOException ioe) { + throw new RuntimeException("Stream data cannot be copied", ioe); + } finally { + os.close(); + is.close(); + } + } + } diff --git a/modules/common/drift/pom.xml b/modules/common/drift/pom.xml index 9500e8d..b1347ab 100644 --- a/modules/common/drift/pom.xml +++ b/modules/common/drift/pom.xml @@ -16,15 +16,23 @@
<dependencies> <dependency> - <groupId>${project.groupId}</groupId> - <artifactId>test-utils</artifactId> - <version>${project.version}</version> - <scope>test</scope> + <groupId>${project.groupId}</groupId> + <artifactId>test-utils</artifactId> + <version>${project.version}</version> + <scope>test</scope> </dependency> + <dependency> - <groupId>commons-io</groupId> - <artifactId>commons-io</artifactId> - </dependency> + <groupId>${project.groupId}</groupId> + <artifactId>rhq-core-domain</artifactId> + <version>${project.version}</version> + <scope>provided</scope> + </dependency> + + <dependency> + <groupId>commons-io</groupId> + <artifactId>commons-io</artifactId> + </dependency> </dependencies>
<profiles> diff --git a/modules/common/filetemplate-bundle/pom.xml b/modules/common/filetemplate-bundle/pom.xml index b6c587e..8e60b26 100644 --- a/modules/common/filetemplate-bundle/pom.xml +++ b/modules/common/filetemplate-bundle/pom.xml @@ -15,6 +15,12 @@ <description>A library with the code common to the agent and server plugins for File Template Bundles</description>
<dependencies> + <dependency> + <groupId>${project.groupId}</groupId> + <artifactId>rhq-core-domain</artifactId> + <version>${project.version}</version> + <scope>provided</scope> + </dependency>
<dependency> <groupId>gnu-getopt</groupId> diff --git a/modules/common/jboss-as/pom.xml b/modules/common/jboss-as/pom.xml index a5d3255..16dd176 100644 --- a/modules/common/jboss-as/pom.xml +++ b/modules/common/jboss-as/pom.xml @@ -16,6 +16,13 @@
<dependencies> <dependency> + <groupId>${project.groupId}</groupId> + <artifactId>rhq-core-domain</artifactId> + <version>${project.version}</version> + <scope>provided</scope> + </dependency> + + <dependency> <groupId>ant</groupId> <artifactId>ant</artifactId> <version>1.6.5</version> diff --git a/modules/common/pom.xml b/modules/common/pom.xml index 45ba5c6..f957b0e 100644 --- a/modules/common/pom.xml +++ b/modules/common/pom.xml @@ -15,12 +15,6 @@ <description>parent POM for all RHQ common plugin libraries</description>
<dependencies> - <dependency> - <groupId>${project.groupId}</groupId> - <artifactId>rhq-core-domain</artifactId> - <version>${project.version}</version> - <scope>provided</scope> <!-- by PC --> - </dependency>
</dependencies>
diff --git a/modules/core/dbutils/pom.xml b/modules/core/dbutils/pom.xml index 1c66dd6..70783f4 100644 --- a/modules/core/dbutils/pom.xml +++ b/modules/core/dbutils/pom.xml @@ -204,11 +204,6 @@ <version>${project.version}</version> </dependency> <dependency> - <groupId>${project.groupId}</groupId> - <artifactId>rhq-core-domain</artifactId> - <version>${project.version}</version> - </dependency> - <dependency> <groupId>org.codehaus.groovy</groupId> <artifactId>groovy-all</artifactId> <version>2.1.3</version> diff --git a/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java b/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java index 5c8002a..56e2df0 100644 --- a/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java +++ b/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.net.InetAddress; import java.util.ArrayList; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Properties; import java.util.regex.Matcher; @@ -39,6 +40,7 @@ import org.rhq.common.jbossas.client.controller.DatasourceJBossASClient; import org.rhq.common.jbossas.client.controller.DeploymentJBossASClient; import org.rhq.common.jbossas.client.controller.WebJBossASClient; import org.rhq.core.db.DatabaseTypeFactory; +import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.util.PropertiesFileUpdate; import org.rhq.core.util.exception.ThrowableUtil; import org.rhq.enterprise.server.installer.ServerInstallUtil.ExistingSchemaOption; @@ -501,7 +503,7 @@ public class InstallerServiceImpl implements InstallerService { ServerInstallUtil.storeServerDetails(serverProperties, clearTextDbPassword, serverDetails);
ServerInstallUtil.persistStorageNodesIfNecessary(serverProperties, clearTextDbPassword, - storageNodeSchemaManager.getStorageNodes()); + parseNodeInformation(serverProperties)); }
@Override @@ -1154,12 +1156,31 @@ public class InstallerServiceImpl implements InstallerService { } }
+ private List<StorageNode> parseNodeInformation(HashMap<String, String> serverProps) { + String[] nodes = serverProps.get("rhq.cassandra.seeds").split(","); + + List<StorageNode> parsedNodes = new ArrayList<StorageNode>(); + for (String node : nodes) { + StorageNode storageNode = new StorageNode(); + storageNode.parseNodeInformation(node); + parsedNodes.add(storageNode); + } + + return parsedNodes; + } + private SchemaManager createStorageNodeSchemaManager(HashMap<String, String> serverProps) { - String[] hosts = serverProps.get("rhq.cassandra.seeds").split(","); String username = serverProps.get("rhq.cassandra.username"); String password = serverProps.get("rhq.cassandra.password");
- return new SchemaManager(username, password, hosts); + List<StorageNode> storageNodes = this.parseNodeInformation(serverProps); + String[] nodes = new String[storageNodes.size()]; + for (int index = 0; index < storageNodes.size(); index++) { + nodes[index] = storageNodes.get(index).getAddress(); + } + int cqlPort = storageNodes.get(0).getCqlPort(); + + return new SchemaManager(username, password, nodes, cqlPort); }
private void writeInstalledFileMarker() throws Exception { diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java index dbd599a..799abcc 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClientManagerBean.java @@ -121,7 +121,13 @@ public class StorageClientManagerBean { * @param storageNodes storage nodes */ private void checkSchemaCompability(String username, String password, List<StorageNode> storageNodes) { - SchemaManager schemaManager = new SchemaManager(username, password, storageNodes); + String[] nodes = new String[storageNodes.size()]; + for (int index = 0; index < storageNodes.size(); index++) { + nodes[index] = storageNodes.get(index).getAddress(); + } + int cqlPort = storageNodes.get(0).getCqlPort(); + + SchemaManager schemaManager = new SchemaManager(username, password, nodes, cqlPort); try { schemaManager.checkCompatibility(); } catch (Exception e) { diff --git a/modules/helpers/metrics-simulator/src/main/java/org/rhq/metrics/simulator/Simulator.java b/modules/helpers/metrics-simulator/src/main/java/org/rhq/metrics/simulator/Simulator.java index 40e00bf..a5a3994 100644 --- a/modules/helpers/metrics-simulator/src/main/java/org/rhq/metrics/simulator/Simulator.java +++ b/modules/helpers/metrics-simulator/src/main/java/org/rhq/metrics/simulator/Simulator.java @@ -28,7 +28,6 @@ package org.rhq.metrics.simulator; import java.io.File; import java.io.IOException; import java.util.HashSet; -import java.util.List; import java.util.PriorityQueue; import java.util.Set; import java.util.concurrent.Executors; @@ -52,7 +51,6 @@ import org.rhq.cassandra.DeploymentOptions; import org.rhq.cassandra.DeploymentOptionsFactory; import org.rhq.cassandra.schema.SchemaManager; import org.rhq.cassandra.util.ClusterBuilder; -import org.rhq.core.domain.cloud.StorageNode; import org.rhq.metrics.simulator.plan.ClusterConfig; import org.rhq.metrics.simulator.plan.ScheduleGroup; import org.rhq.metrics.simulator.plan.SimulationPlan; @@ -84,17 +82,16 @@ public class Simulator implements ShutdownManager { } });
- List<StorageNode> nodes = initCluster(plan); - - createSchema(nodes); + initCluster(plan); + createSchema();
Session session; if (plan.getClientCompression() == null) { - session = createSession(nodes); + session = createSession(); } else { ProtocolOptions.Compression compression = Enum.valueOf(ProtocolOptions.Compression.class, plan.getClientCompression().toUpperCase()); - session = createSession(nodes, compression); + session = createSession(compression); }
StorageSession storageSession = new StorageSession(session); @@ -172,17 +169,16 @@ public class Simulator implements ShutdownManager { log.info("Shut down complete"); }
- private List<StorageNode> initCluster(SimulationPlan plan) { + private void initCluster(SimulationPlan plan) { try { - List<StorageNode> nodes = deployCluster(plan.getClusterConfig()); - waitForClusterToInitialize(nodes); - return nodes; + deployCluster(plan.getClusterConfig()); + waitForClusterToInitialize(); } catch (Exception e) { throw new RuntimeException("Failed to start simulator. Cluster initialization failed.", e); } }
- private List<StorageNode> deployCluster(ClusterConfig clusterConfig) throws IOException { + private void deployCluster(ClusterConfig clusterConfig) throws IOException { File clusterDir = new File(clusterConfig.getClusterDir(), "cassandra"); log.info("Deploying cluster to " + clusterDir); clusterDir.mkdirs(); @@ -200,10 +196,8 @@ public class Simulator implements ShutdownManager { deploymentOptions.load();
ccm = new CassandraClusterManager(deploymentOptions); - List<StorageNode> nodes = ccm.createCluster(); + ccm.createCluster(); ccm.startCluster(false); - - return nodes; }
private void shutdownCluster() { @@ -211,26 +205,25 @@ public class Simulator implements ShutdownManager { ccm.shutdownCluster(); }
- private void waitForClusterToInitialize(List<StorageNode> nodes) { + private void waitForClusterToInitialize() { log.info("Waiting for cluster to initialize"); ClusterInitService clusterInitService = new ClusterInitService(); - clusterInitService.waitForClusterToStart(nodes, nodes.size(), 1500, 20, 2); + clusterInitService.waitForClusterToStart(ccm.getNodes(), ccm.getJmxPorts(), ccm.getNodes().length, 20, 2, 1500); }
- private void createSchema(List<StorageNode> nodes) { + private void createSchema() { try { log.info("Creating schema"); - SchemaManager schemaManager = new SchemaManager("rhqadmin", "rhqadmin", nodes); + SchemaManager schemaManager = new SchemaManager("rhqadmin", "rhqadmin", ccm.getNodes(), ccm.getCqlPort()); schemaManager.install(); } catch (Exception e) { throw new RuntimeException("Failed to start simulator. An error occurred during schema creation.", e); } }
- private Session createSession(List<StorageNode> nodes) throws NoHostAvailableException { + private Session createSession() throws NoHostAvailableException { try { - Cluster cluster = new ClusterBuilder() - .addContactPoints(getHostNames(nodes)) + Cluster cluster = new ClusterBuilder().addContactPoints(ccm.getNodes()).withPort(ccm.getCqlPort()) .withCredentials("rhqadmin", "rhqadmin") .build();
@@ -244,13 +237,12 @@ public class Simulator implements ShutdownManager { } }
- private Session createSession(List<StorageNode> nodes, ProtocolOptions.Compression compression) + private Session createSession(ProtocolOptions.Compression compression) throws NoHostAvailableException { try { log.debug("Creating session using " + compression.name() + " compression");
- Cluster cluster = new ClusterBuilder() - .addContactPoints(getHostNames(nodes)) + Cluster cluster = new ClusterBuilder().addContactPoints(ccm.getNodes()).withPort(ccm.getCqlPort()) .withCredentials("cassandra", "cassandra") .withCompression(compression) .build(); @@ -264,6 +256,7 @@ public class Simulator implements ShutdownManager { } }
+ @SuppressWarnings("deprecation") private Session initSession(Cluster cluster) { NodeFailureListener listener = new NodeFailureListener(); for (Host host : cluster.getMetadata().getAllHosts()) { @@ -273,14 +266,6 @@ public class Simulator implements ShutdownManager { return cluster.connect("rhq"); }
- private String[] getHostNames(List<StorageNode> nodes) { - String[] hostnames = new String[nodes.size()]; - for (int i = 0; i < hostnames.length; ++i) { - hostnames[i] = nodes.get(i).getAddress(); - } - return hostnames; - } - private Set<Schedule> initSchedules(ScheduleGroup scheduleSet) { long nextCollection = System.currentTimeMillis(); Set<Schedule> schedules = new HashSet<Schedule>(); diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java index 63517e1..50f8156 100644 --- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java @@ -150,9 +150,11 @@ public class StorageNodeComponentITest { storageNode.parseNodeInformation("127.0.0.1|7399|9142");
ClusterInitService clusterInitService = new ClusterInitService(); - clusterInitService.waitForClusterToStart(asList(storageNode)); + clusterInitService.waitForClusterToStart(new String[] { storageNode.getAddress() }, + new int[] { storageNode.getJmxPort() });
- SchemaManager schemaManager = new SchemaManager("rhqadmin", "rhqadmin", "127.0.0.1|7399|9142"); + SchemaManager schemaManager = new SchemaManager("rhqadmin", "rhqadmin", + new String[] { storageNode.getAddress() }, storageNode.getCqlPort()); schemaManager.install(); schemaManager.updateTopology(); }
commit f86a0777fa4cf7f3cacccca6e3eff9cbd9e49b7e Author: John Sanda jsanda@redhat.com Date: Mon Aug 19 16:14:23 2013 -0400
shortening mode name so we now have ADD_MAINTENANCE and REMOVE_MAINTENANCE
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java index e0f278d..aac8cb6 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java @@ -247,7 +247,7 @@ public class StorageNode implements Serializable { return Status.INSTALLED; } if (operationMode == OperationMode.ANNOUNCE || operationMode == OperationMode.BOOTSTRAP || - operationMode == OperationMode.ADD_NODE_MAINTENANCE) { + operationMode == OperationMode.ADD_MAINTENANCE) { if (errorMessage == null && failedOperation == null) { return Status.JOINING; } else { @@ -280,7 +280,7 @@ public class StorageNode implements Serializable { "gossip from its IP address."), BOOTSTRAP("The storage is installed but not yet part of the cluster. It is getting bootstrapped into the " + "cluster"), - ADD_NODE_MAINTENANCE("The storage node is running and is preparing to undergo routine maintenance that is " + + ADD_MAINTENANCE("The storage node is running and is preparing to undergo routine maintenance that is " + "necessary when a new node joins the cluster."), REMOVE_MAINTENANCE("The storage node is no longer part of the cluster. Remaining storage node are " + "undergoing cluster maintenance due to the topology change."), diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 55593d3..4d4fd17 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -246,7 +246,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN reset(); storageNodeOperationsHandler.bootstrapStorageNode(subject, storageNode); break; - case ADD_NODE_MAINTENANCE: + case ADD_MAINTENANCE: reset(); storageNodeOperationsHandler.performAddNodeMaintenance(subject, storageNode); default: @@ -272,7 +272,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN reset(); storageNodeOperationsHandler.unannounceStorageNode(subject, storageNode); break; - case ADD_NODE_MAINTENANCE: + case ADD_MAINTENANCE: case NORMAL: case DECOMMISSION: reset(); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java index b619e72..b068734 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java @@ -34,7 +34,6 @@ import org.rhq.enterprise.server.auth.SessionManager; import org.rhq.enterprise.server.auth.SubjectManagerLocal; import org.rhq.enterprise.server.cloud.StorageNodeManagerLocal; import org.rhq.enterprise.server.operation.OperationManagerLocal; -import org.rhq.enterprise.server.resource.ResourceFactoryManagerLocal; import org.rhq.enterprise.server.resource.ResourceManagerLocal; import org.rhq.server.metrics.StorageSession;
@@ -193,7 +192,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa
@Override public void performAddNodeMaintenance(Subject subject, StorageNode storageNode) { - storageNode.setOperationMode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); + storageNode.setOperationMode(StorageNode.OperationMode.ADD_MAINTENANCE); List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL) .getResultList(); @@ -294,7 +293,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa String msg = "Aborting storage node deployment due to unexpected error while performing add node " + "maintenance."; log.error(msg, e); - storageNodeOperationsHandler.logError(StorageNode.OperationMode.ADD_NODE_MAINTENANCE, msg, e); + storageNodeOperationsHandler.logError(StorageNode.OperationMode.ADD_MAINTENANCE, msg, e); } } else if (operationHistory.getOperationDefinition().getName().equals("decommission")) { try { @@ -435,11 +434,11 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa // nothing to do here return; case CANCELED: - newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); + newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_MAINTENANCE); deploymentOperationCanceled(storageNode, resourceOperationHistory, newStorageNode); return; case FAILURE: - newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); + newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_MAINTENANCE); deploymentOperationFailed(storageNode, resourceOperationHistory, newStorageNode); return; default: // SUCCESS @@ -450,7 +449,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa if (nextNode == null) { log.info("Finished running add node maintenance on all cluster nodes"); // TODO replace this with an UPDATE statement - newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); + newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_MAINTENANCE); newStorageNode.setOperationMode(StorageNode.OperationMode.NORMAL); } else { Configuration parameters = resourceOperationHistory.getParameters();
commit dd85d556fcf2b3e5bf34028d2607d2ccde86853f Author: John Sanda jsanda@redhat.com Date: Mon Aug 19 15:51:14 2013 -0400
prepareForBootstrap operation should continue if the storage node is already stopped
Previously the prepareForShutdown operation would fail if the shutdown operation fails which will happen if the node is already stopped. There is no reason to fail the prepareForShutdown operation here because we want the node shut down. And if C* bootstrapping previously failed, the storage node is likely down.
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index 7455f5e..2974beb 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -290,6 +290,8 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper }
private OperationResult uninstall() { + log.info("Uninstalling storage node at " + getResourceContext().getResourceKey()); + OperationResult result = new OperationResult(); OperationResult shutdownResult = shutdownIfNecessary(); if (shutdownResult.getErrorMessage() != null) { @@ -363,14 +365,13 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper OperationResult result = new OperationResult();
log.info("Stopping storage node"); - OperationResult stopNodeResult = shutdownStorageNode(); - if (stopNodeResult.getErrorMessage() != null) { - log.error("Failed to stop storage node " + this + " Cannot prepare the node for bootstrap which means " + - "that the storage node cannot join the cluster. Make sure the storage node is not running and retry " + - "the operation"); - result.setErrorMessage("Failed to stop storage node. Cannot prepare the node for bootstrap which means " + - "that it cannot join the cluster. Make sure that the node is not running and retry the operation. " + - "Stopping the storage node failed with this error: " + stopNodeResult.getErrorMessage()); + OperationResult shutdownResult = shutdownIfNecessary(); + if (shutdownResult.getErrorMessage() != null) { + log.error("Failed to stop storage node " + getResourceContext().getResourceKey() + ". The storage node " + + "must be shut down in order for the changes made by this operation to take effect."); + result.setErrorMessage("Failed to stop the storage node. The storage node must be shut down in order " + + "for the changes made by this operation to take effect. The attempt to stop shut down the storage " + + "node failed with this error: " + shutdownResult.getErrorMessage()); return result; }
commit dc8fe178b0b24d756dc1fdddeafceb8ea9dc0ea0 Author: John Sanda jsanda@redhat.com Date: Mon Aug 19 15:25:55 2013 -0400
fixing typo in api change justification
diff --git a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml index ea3b340..b5a4357 100644 --- a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml +++ b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml @@ -74,7 +74,7 @@ <difference> <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className> <differenceType>7012</differenceType> <!-- method added to an interface --> - <method>void unDeployStorageNode(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.cloud.StorageNode)</method> + <method>void undeployStorageNode(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.cloud.StorageNode)</method> <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
commit 33597f038d10783c15ff62727b9c2d8a17570633 Author: John Sanda jsanda@redhat.com Date: Mon Aug 19 15:20:04 2013 -0400
refactor common operation scheduling code into a util method
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java index f2fc108..b619e72 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java @@ -103,16 +103,11 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa if (log.isInfoEnabled()) { log.info("Announcing " + newStorageNode + " to cluster node " + clusterNode); } - ResourceOperationSchedule schedule = new ResourceOperationSchedule(); - schedule.setResource(clusterNode.getResource()); - schedule.setJobTrigger(JobTrigger.createNowTrigger()); - schedule.setSubject(subject); - schedule.setOperationName("announce"); + Configuration parameters = new Configuration(); parameters.put(addresses); - schedule.setParameters(parameters);
- operationManager.scheduleResourceOperation(subject, schedule); + scheduleOperation(subject, clusterNode, parameters, "announce"); }
@Override @@ -129,16 +124,10 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa }
private void unannounceStorageNode(Subject subject, StorageNode clusterNode, PropertyList addresses) { - ResourceOperationSchedule schedule = new ResourceOperationSchedule(); - schedule.setResource(clusterNode.getResource()); - schedule.setJobTrigger(JobTrigger.createNowTrigger()); - schedule.setSubject(subject); - schedule.setOperationName("unannounce"); Configuration parameters = new Configuration(); parameters.put(addresses); - schedule.setParameters(parameters);
- operationManager.scheduleResourceOperation(subject, schedule); + scheduleOperation(subject, clusterNode, parameters, "unannounce"); }
@Override @@ -150,15 +139,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa if (storageNode.getResource() == null) { finishUninstall(subject, storageNode); } else { - ResourceOperationSchedule schedule = new ResourceOperationSchedule(); - schedule.setResource(storageNode.getResource()); - schedule.setJobTrigger(JobTrigger.createNowTrigger()); - schedule.setSubject(subject); - schedule.setOperationName("uninstall"); - Configuration parameters = new Configuration(); - schedule.setParameters(parameters); - - operationManager.scheduleResourceOperation(subject, schedule); + scheduleOperation(subject, storageNode, new Configuration(), "uninstall"); } }
@@ -183,14 +164,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa // queue up storage nodes during cluster maintenance operations. storageNode.setMaintenancePending(runRepair);
- ResourceOperationSchedule schedule = new ResourceOperationSchedule(); - schedule.setOperationName("decommission"); - schedule.setResource(storageNode.getResource()); - schedule.setJobTrigger(JobTrigger.createNowTrigger()); - schedule.setSubject(subject); - schedule.setParameters(new Configuration()); - - operationManager.scheduleResourceOperation(subject, schedule); + scheduleOperation(subject, storageNode, new Configuration(), "decommission"); }
@Override @@ -238,21 +212,12 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa if (log.isInfoEnabled()) { log.info("Running addNodeMaintenance for storage node " + storageNode); } + Configuration params = new Configuration(); + params.put(seedsList); + params.put(new PropertySimple(RUN_REPAIR_PROPERTY, runRepair)); + params.put(new PropertySimple(UPDATE_SEEDS_LIST, Boolean.TRUE));
- ResourceOperationSchedule schedule = new ResourceOperationSchedule(); - schedule.setResource(storageNode.getResource()); - schedule.setJobTrigger(JobTrigger.createNowTrigger()); - schedule.setSubject(subject); - schedule.setOperationName("addNodeMaintenance"); - - Configuration config = new Configuration(); - config.put(seedsList); - config.put(new PropertySimple(RUN_REPAIR_PROPERTY, runRepair)); - config.put(new PropertySimple(UPDATE_SEEDS_LIST, Boolean.TRUE)); - - schedule.setParameters(config); - - operationManager.scheduleResourceOperation(subject, schedule); + scheduleOperation(subject, storageNode, params, "addNodeMaintenance"); }
@Override @@ -286,21 +251,12 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa if (log.isInfoEnabled()) { log.info("Running remove node maintenance for storage node " + storageNode); } + Configuration params = new Configuration(); + params.put(seedsList); + params.put(new PropertySimple(RUN_REPAIR_PROPERTY, runRepair)); + params.put(new PropertySimple(UPDATE_SEEDS_LIST, true));
- ResourceOperationSchedule schedule = new ResourceOperationSchedule(); - schedule.setResource(storageNode.getResource()); - schedule.setJobTrigger(JobTrigger.createNowTrigger()); - schedule.setSubject(subject); - schedule.setOperationName("removeNodeMaintenance"); - - Configuration config = new Configuration(); - config.put(seedsList); - config.put(new PropertySimple(RUN_REPAIR_PROPERTY, runRepair)); - config.put(new PropertySimple(UPDATE_SEEDS_LIST, Boolean.TRUE)); - - schedule.setParameters(config); - - operationManager.scheduleResourceOperation(subject, schedule); + scheduleOperation(subject, storageNode, params, "removeNodeMaintenance"); }
@Override @@ -578,7 +534,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa break; default: // SUCCESS log.info("Successfully uninstalled " + storageNode + " from disk"); - uninstall(getSubject(operationHistory), storageNode); + finishUninstall(getSubject(operationHistory), storageNode); } }
@@ -701,22 +657,13 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa if (log.isInfoEnabled()) { log.info("Preparing to bootstrap " + storageNode + " into cluster..."); } - - ResourceOperationSchedule schedule = new ResourceOperationSchedule(); - schedule.setResource(storageNode.getResource()); - schedule.setJobTrigger(JobTrigger.createNowTrigger()); - schedule.setSubject(subject); - schedule.setOperationName("prepareForBootstrap"); - StorageClusterSettings clusterSettings = storageClusterSettingsManager.getClusterSettings(subject); Configuration parameters = new Configuration(); parameters.put(new PropertySimple("cqlPort", clusterSettings.getCqlPort())); parameters.put(new PropertySimple("gossipPort", clusterSettings.getGossipPort())); parameters.put(addresses);
- schedule.setParameters(parameters); - - operationManager.scheduleResourceOperation(subject, schedule); + scheduleOperation(subject, storageNode, parameters, "prepareForBootstrap"); }
private StorageNode takeFromMaintenanceQueue() { @@ -832,6 +779,19 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa return value; }
+ private void scheduleOperation(Subject subject, StorageNode storageNode, Configuration parameters, + String operation) { + ResourceOperationSchedule schedule = new ResourceOperationSchedule(); + schedule.setResource(storageNode.getResource()); + schedule.setJobTrigger(JobTrigger.createNowTrigger()); + schedule.setSubject(subject); + schedule.setOperationName(operation); + schedule.setParameters(parameters); + + operationManager.scheduleResourceOperation(subject, schedule); + + } + private PropertyList createPropertyListOfAddresses(String propertyName, List<StorageNode> nodes) { PropertyList list = new PropertyList(propertyName); for (StorageNode storageNode : nodes) {
commit 50197e1e89d1382dce4fd7e9f703920fd239aec1 Author: John Sanda jsanda@redhat.com Date: Mon Aug 19 14:33:08 2013 -0400
fixing API check for new undeploy method in remote API
diff --git a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml index 58bd618..ea3b340 100644 --- a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml +++ b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml @@ -72,6 +72,13 @@ </difference>
<difference> + <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>void unDeployStorageNode(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.cloud.StorageNode)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> <className>org/rhq/enterprise/server/bundle/BundleManagerRemote</className> <differenceType>7012</differenceType> <!-- method added to an interface --> <method>void assignBundlesToBundleGroup(org.rhq.core.domain.auth.Subject, int, int[])</method>
commit 6ef8696c1d2c46571efcf638b0416d06fe3cffc5 Author: Jirka Kremser jkremser@redhat.com Date: Mon Aug 19 19:41:08 2013 +0200
Adding the support for storage node (un)deployment in coregui.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java index d1ea625..c49f697 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java @@ -124,7 +124,7 @@ public class StorageNodeAdminView extends EnhancedVLayout implements Bookmarkabl public void onFailure(Throwable caught) { Message message = new Message("Unable to render storage node alert view: " + caught.getMessage(), Message.Severity.Warning); - CoreGUI.goToView(VIEW_ID.getName(), message); + CoreGUI.goToView(StorageNodeTableView.VIEW_PATH, message); }
@Override diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java index 685fb5d..9ee4f28 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java @@ -31,6 +31,7 @@ import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDat import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_MTIME; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_OPERATION_MODE; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_RESOURCE_ID; +import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_STATUS;
import java.util.ArrayList; import java.util.List; @@ -63,7 +64,6 @@ import org.rhq.enterprise.gui.coregui.client.LinkManager; import org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.StorageNodeLoadCompositeDatasourceField; import org.rhq.enterprise.gui.coregui.client.components.table.TimestampCellFormatter; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; -import org.rhq.enterprise.gui.coregui.client.inventory.resource.AncestryUtil; import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.MeasurementConverterClient; import org.rhq.enterprise.gui.coregui.client.util.RPCDataSource; @@ -144,7 +144,7 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit // cqlField.setHidden(true); // fields.add(cqlField);
- field = FIELD_OPERATION_MODE.getListGridField("90"); + field = FIELD_STATUS.getListGridField("90"); field.setCellFormatter(new CellFormatter() { public String format(Object value, ListGridRecord listGridRecord, int i, int i1) { if (listGridRecord.getAttribute(FIELD_ERROR_MESSAGE.propertyName()) != null @@ -154,6 +154,7 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit return value.toString(); } }); + field.setShowHover(true); field.setHoverCustomizer(new HoverCustomizer() { public String hoverHTML(Object value, ListGridRecord record, int rowNum, int colNum) { @@ -235,6 +236,7 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit record.setAttribute(FIELD_JMX_PORT.propertyName(), node.getJmxPort()); record.setAttribute(FIELD_CQL_PORT.propertyName(), node.getCqlPort()); record.setAttribute(FIELD_OPERATION_MODE.propertyName(), node.getOperationMode()); + record.setAttribute(FIELD_STATUS.propertyName(), node.getStatus()); record.setAttribute(FIELD_ERROR_MESSAGE.propertyName(), node.getErrorMessage()); if (node.getFailedOperation() != null && node.getFailedOperation().getResource() != null) { ResourceOperationHistory operationHistory = node.getFailedOperation(); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasourceField.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasourceField.java index 04a1767..ada97fa 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasourceField.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasourceField.java @@ -40,6 +40,8 @@ public enum StorageNodeDatasourceField {
FIELD_OPERATION_MODE("operationMode", CoreGUI.getMessages().view_adminTopology_server_mode()),
+ FIELD_STATUS("status", "Status"), + FIELD_MEMORY("memory", "Memory"),
FIELD_DISK("disk", "Disk"), diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java index 4055e6f..f5de561 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java @@ -77,6 +77,9 @@ import org.rhq.enterprise.gui.coregui.client.util.message.Message; public class StorageNodeDetailView extends EnhancedVLayout implements BookmarkableView {
private final int storageNodeId; + +// String path = StorageNodeAdminView.VIEW_PATH + "/" + storageNodeId; +// CoreGUI.goToView(path, message);
private static final int SECTION_COUNT = 3; private final SectionStack sectionStack; @@ -121,9 +124,7 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab new AsyncCallback<PageList<StorageNode>>() { public void onSuccess(final PageList<StorageNode> storageNodes) { if (storageNodes == null || storageNodes.isEmpty() || storageNodes.size() != 1) { - CoreGUI.getErrorHandler().handleError( - MSG.view_adminTopology_message_fetchServerFail(String.valueOf(storageNodeId))); - initSectionCount = SECTION_COUNT; + onFailure(new Exception("No storage nodes have been found.")); } final StorageNode node = storageNodes.get(0); header.setContents("<div style='text-align: center; font-weight: bold; font-size: medium;'> Storage Node (" @@ -166,6 +167,7 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab Message message = new Message(MSG.view_configurationHistoryDetails_error_loadFailure(), Message.Severity.Warning); initSectionCount = SECTION_COUNT; + CoreGUI.getMessageCenter().notify(message); }
@Override @@ -199,7 +201,10 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab new AsyncCallback<Map<String, List<MeasurementDataNumericHighLowComposite>>>() { @Override public void onFailure(Throwable caught) { - + Message message = new Message("Unable to fetch storage node load data.", + Message.Severity.Warning); + initSectionCount = SECTION_COUNT; + CoreGUI.getMessageCenter().notify(message); }
@Override @@ -357,7 +362,6 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab ResourceOperationHistory operationHistory = storageNode.getFailedOperation(); String value = LinkManager.getSubsystemResourceOperationHistoryLink(operationHistory.getResource().getId(), operationHistory.getId()); - // String value = "#Resource/" + operationHistory.getResource().getId() + "/Operations/History/" + operationHistory.getId()); lastOperation = new StaticTextItem("lastOp", "Operation"); lastOperation.setValue(LinkManager.getHref(value, operationHistory.getOperationDefinition() .getDisplayName())); @@ -389,7 +393,6 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab loadLayout.setWidth100(); LayoutSpacer spacer = new LayoutSpacer(); spacer.setHeight(10); -// HTMLFlow loadLabel = new HTMLFlow("<span style='font-weight:bold'>Status</span>"); HTMLFlow loadLabel = new HTMLFlow("Status"); loadLabel.addStyleName("formTitle"); loadLabel.setTooltip("Contains selected metrics collected for last 8 hours."); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java index 84c1586..50cb614 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java @@ -20,7 +20,7 @@ package org.rhq.enterprise.gui.coregui.client.admin.storage;
import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_ADDRESS; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_ALERTS; -import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_RESOURCE_ID; +import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.*;
import java.util.ArrayList; import java.util.Arrays; @@ -42,6 +42,7 @@ import com.smartgwt.client.widgets.grid.ListGridField; import com.smartgwt.client.widgets.grid.ListGridRecord;
import org.rhq.core.domain.authz.Permission; +import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNode.OperationMode; import org.rhq.enterprise.gui.coregui.client.CoreGUI; import org.rhq.enterprise.gui.coregui.client.LinkManager; @@ -85,7 +86,7 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> { protected void doOnDraw() { super.doOnDraw(); // commenting out this call, because it caused UI to freeze -// scheduleUnacknowledgedAlertsPollingJob(getListGrid()); + // scheduleUnacknowledgedAlertsPollingJob(getListGrid()); }
@Override @@ -204,6 +205,80 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> {
private void showCommonActions() { addInvokeOperationsAction(); + addDeployAction(); + addUndeployAction(); + } + + private void addUndeployAction() { + final ParametrizedMessage question = new ParametrizedMessage() { + @Override + public String getMessage(String... param) { + return "Are you sure, you want to run the undeploy operation on selected nodes: " + param[0] + + " ? It may take a while to complete."; + } + }; + final ParametrizedMessage success = new ParametrizedMessage() { + @Override + public String getMessage(String... param) { + return "Starting the undeploy operation on storage nodes " + param[0]; + } + }; + final ParametrizedMessage failure = new ParametrizedMessage() { + @Override + public String getMessage(String... param) { + return "Invoking the undeploy operation failed for storage nodes " + param[0] + " ids: " + param[1]; + } + }; + + addTableAction("Undeploy Selected", null, new AuthorizedTableAction(this, TableActionEnablement.SINGLE, + Permission.MANAGE_SETTINGS) { + + @Override + public boolean isEnabled(ListGridRecord[] selection) { + return StorageNodeTableView.this.isUndeployable(super.isEnabled(selection), selection); + } + + @Override + public void executeAction(final ListGridRecord[] selections, Object actionValue) { + executeBulkAction(selections, actionValue, question, success, failure, StorageNodeOperation.UNDEPLOY); + } + }); + } + + private void addDeployAction() { + final ParametrizedMessage question = new ParametrizedMessage() { + @Override + public String getMessage(String... param) { + return "Are you sure, you want to run the deploy operation on selected nodes: " + param[0] + + " ? It may take a while to complete."; + } + }; + final ParametrizedMessage success = new ParametrizedMessage() { + @Override + public String getMessage(String... param) { + return "Starting the deploy operation on storage nodes " + param[0]; + } + }; + final ParametrizedMessage failure = new ParametrizedMessage() { + @Override + public String getMessage(String... param) { + return "Invoking the deploy operation failed for storage nodes " + param[0] + " ids: " + param[1]; + } + }; + + addTableAction("Deploy Selected", null, new AuthorizedTableAction(this, TableActionEnablement.SINGLE, + Permission.MANAGE_SETTINGS) { + + @Override + public boolean isEnabled(ListGridRecord[] selection) { + return StorageNodeTableView.this.isDeployable(super.isEnabled(selection), selection); + } + + @Override + public void executeAction(final ListGridRecord[] selections, Object actionValue) { + executeBulkAction(selections, actionValue, question, success, failure, StorageNodeOperation.DEPLOY); + } + }); }
private void addInvokeOperationsAction() { @@ -213,7 +288,6 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> { operationsMap.put("Restart", "restart"); operationsMap.put("Disable Debug Mode", "stopRPCServer"); operationsMap.put("Enable Debug Mode", "startRPCServer"); - // operationsMap.put("Decommission", "decommission");
addTableAction(MSG.common_title_operation(), null, operationsMap, new AuthorizedTableAction(this, TableActionEnablement.ANY, Permission.MANAGE_SETTINGS) { @@ -221,77 +295,129 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> { @Override public boolean isEnabled(ListGridRecord[] selection) { return StorageNodeTableView.this.isEnabled(super.isEnabled(selection), selection); - }; + }
@Override public void executeAction(final ListGridRecord[] selections, Object actionValue) { - final String operationName = (String) actionValue; - final List<String> selectedAddresses = getSelectedAddresses(selections); - // String message = MSG.view_adminTopology_message_setModeConfirm(selectedAddresses.toString(), mode.name()); - SC.ask("Are you sure, you want to run operation " + operationName + "?", new BooleanCallback() { - public void execute(Boolean confirmed) { - if (confirmed) { - final CountDownLatch latch = CountDownLatch.create(selections.length, new Command() { - @Override - public void execute() { - // Message msg = new Message(MSG.view_adminTopology_message_setMode( - // String.valueOf(selections.length), mode.name()), Message.Severity.Info); - Message msg = new Message("Operation" + operationName - + " was successfully scheduled for resources with ids" - + Arrays.asList(getSelectedIds(selections)), Message.Severity.Info); - CoreGUI.getMessageCenter().notify(msg); - refreshTableInfo(); - } - }); - boolean isStopStartOrRestart = Arrays.asList("start", "shutdown", "restart").contains( - operationName); - for (ListGridRecord storageNodeRecord : selections) { - // NFE should never happen, because of the condition for table action enablement - int resourceId = storageNodeRecord.getAttributeAsInt(FIELD_RESOURCE_ID.propertyName()); - if (isStopStartOrRestart) { - // start, stop or restart the storage node - GWTServiceLookup.getOperationService().scheduleResourceOperation(resourceId, - operationName, null, "Run by Storage Node Administrations UI", 0, - new AsyncCallback<Void>() { - public void onSuccess(Void result) { - latch.countDown(); - } - - public void onFailure(Throwable caught) { - CoreGUI.getErrorHandler().handleError( - "Scheduling operation " + operationName - + " failed for resources with ids" - + Arrays.asList(getSelectedIds(selections)) + " " - + caught.getMessage(), caught); - latch.countDown(); - refreshTableInfo(); - } - }); + ParametrizedMessage question = new ParametrizedMessage() { + @Override + public String getMessage(String... param) { + return "Are you sure, you want to run operation " + param[0] + "?"; + } + }; + ParametrizedMessage success = new ParametrizedMessage() { + @Override + public String getMessage(String... param) { + return "Operation" + param[0] + " was successfully scheduled for storage nodes " + param[1]; + } + }; + ParametrizedMessage failure = new ParametrizedMessage() { + @Override + public String getMessage(String... param) { + return "Scheduling operation " + param[0] + " failed for storage nodes " + param[1]; + } + }; + executeBulkAction(selections, actionValue, question, success, failure, StorageNodeOperation.OTHER); + } + }); + } + + private enum StorageNodeOperation { + DEPLOY, UNDEPLOY, OTHER + } + + private interface ParametrizedMessage { + String getMessage(String... param); + } + + private void executeBulkAction(final ListGridRecord[] selections, Object actionValue, ParametrizedMessage question, + final ParametrizedMessage success, final ParametrizedMessage failure, final StorageNodeOperation operationType) { + final String operationName = (String) actionValue; + final List<String> selectedAddresses = getSelectedAddresses(selections); + SC.ask(question.getMessage(selectedAddresses.toString()), new BooleanCallback() { + public void execute(Boolean confirmed) { + if (confirmed) { + final CountDownLatch latch = CountDownLatch.create(selections.length, new Command() { + @Override + public void execute() { + String msgString = null; + if (operationType == StorageNodeOperation.OTHER) { + msgString = success.getMessage(operationName, selectedAddresses.toString()); + } else { + msgString = success.getMessage(selectedAddresses.toString()); + } + Message msg = new Message(msgString, Message.Severity.Info); + CoreGUI.getMessageCenter().notify(msg); + refreshTableInfo(); + } + }); + boolean isStopStartOrRestart = Arrays.asList("start", "shutdown", "restart") + .contains(operationName); + for (ListGridRecord storageNodeRecord : selections) { + // NFE should never happen, because of the condition for table action enablement + int resourceId = storageNodeRecord.getAttributeAsInt(FIELD_RESOURCE_ID.propertyName()); + if (isStopStartOrRestart) { + // start, stop or restart the storage node + GWTServiceLookup.getOperationService().scheduleResourceOperation(resourceId, operationName, + null, "Run by Storage Node Administrations UI", 0, new AsyncCallback<Void>() { + public void onSuccess(Void result) { + latch.countDown(); + } + + public void onFailure(Throwable caught) { + String msg = failure.getMessage(operationName, + selectedAddresses + " " + caught.getMessage()); + CoreGUI.getErrorHandler().handleError(msg, caught); + latch.countDown(); + refreshTableInfo(); + } + }); + } else { + if (operationType != StorageNodeOperation.OTHER) { // (un)deploy + AsyncCallback<Void> callback = new AsyncCallback<Void>() { + public void onSuccess(Void result) { + latch.countDown(); + } + + public void onFailure(Throwable caught) { + String msg = failure.getMessage( + selectedAddresses.toString(), + Arrays.asList(getSelectedIds(selections)).toString() + " " + + caught.getMessage()); + CoreGUI.getErrorHandler().handleError(msg, caught); + latch.countDown(); + refreshTableInfo(); + } + }; + int storageNodeId = storageNodeRecord.getAttributeAsInt("id"); + StorageNode node = new StorageNode(storageNodeId); + if (operationType == StorageNodeOperation.DEPLOY) { + GWTServiceLookup.getStorageService().deployStorageNode(node, callback); } else { - // invoke the operation on the storage service resource - GWTServiceLookup.getStorageService().invokeOperationOnStorageService(resourceId, - operationName, new AsyncCallback<Void>() { - public void onSuccess(Void result) { - latch.countDown(); - } - - public void onFailure(Throwable caught) { - CoreGUI.getErrorHandler().handleError( - "Scheduling operation " + operationName - + " failed for resources with ids" - + Arrays.asList(getSelectedIds(selections)) + " " - + caught.getMessage(), caught); - latch.countDown(); - refreshTableInfo(); - } - }); + GWTServiceLookup.getStorageService().undeployStorageNode(node, callback); } + } else { + // invoke the operation on the storage service resource + GWTServiceLookup.getStorageService().invokeOperationOnStorageService(resourceId, + operationName, new AsyncCallback<Void>() { + public void onSuccess(Void result) { + latch.countDown(); + } + + public void onFailure(Throwable caught) { + String msg = failure.getMessage(operationName, selectedAddresses + " " + + caught.getMessage()); + CoreGUI.getErrorHandler().handleError(msg, caught); + latch.countDown(); + refreshTableInfo(); + } + }); } - } else { - refreshTableInfo(); } } - }); + } else { + refreshTableInfo(); + } } }); } @@ -331,6 +457,33 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> { return true; }
+ private boolean isDeployable(boolean parentsOpinion, ListGridRecord[] selection) { + if (!parentsOpinion || !isEnabled(parentsOpinion, selection)) { + return false; + } + for (ListGridRecord storageNodeRecord : selection) { + if ("NORMAL".equals(storageNodeRecord.getAttributeAsString(FIELD_STATUS.propertyName())) + || "JOINING".equals(storageNodeRecord.getAttributeAsString(FIELD_STATUS.propertyName())) + || "LEAVING".equals(storageNodeRecord.getAttributeAsString(FIELD_STATUS.propertyName()))) { + return false; + } + } + return true; + } + + private boolean isUndeployable(boolean parentsOpinion, ListGridRecord[] selection) { + if (!parentsOpinion || !isEnabled(parentsOpinion, selection)) { + return false; + } + for (ListGridRecord storageNodeRecord : selection) { + if ("JOINING".equals(storageNodeRecord.getAttributeAsString(FIELD_STATUS.propertyName())) + || "LEAVING".equals(storageNodeRecord.getAttributeAsString(FIELD_STATUS.propertyName()))) { + return false; + } + } + return true; + } + @Override protected String getBasePath() { return VIEW_PATH; diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java index 72f17b18..9470302 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java @@ -27,6 +27,7 @@ import java.util.Map;
import com.google.gwt.user.client.rpc.RemoteService;
+import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.cloud.StorageClusterSettings; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; @@ -92,4 +93,8 @@ public interface StorageGWTService extends RemoteService { StorageClusterSettings retrieveClusterSettings() throws RuntimeException;
void updateClusterSettings(StorageClusterSettings clusterSettings) throws RuntimeException; + + void undeployStorageNode(StorageNode storageNode) throws RuntimeException; + + void deployStorageNode(StorageNode storageNode) throws RuntimeException; } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java index ae18075..5548285 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java @@ -26,6 +26,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Map;
+import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.cloud.StorageClusterSettings; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; @@ -201,5 +202,23 @@ public class StorageGWTServiceImpl extends AbstractGWTServiceImpl implements Sto } catch (Throwable t) { throw getExceptionToThrowToClient(t); } - } + } + + @Override + public void undeployStorageNode(StorageNode storageNode) throws RuntimeException { + try { + storageNodeManager.undeployStorageNode(getSessionSubject(), storageNode); + } catch (Throwable t) { + throw getExceptionToThrowToClient(t); + } + } + + @Override + public void deployStorageNode(StorageNode storageNode) throws RuntimeException { + try { + storageNodeManager.deployStorageNode(getSessionSubject(), storageNode); + } catch (Throwable t) { + throw getExceptionToThrowToClient(t); + } + } }
commit da1bc7863207dde3ae2cc51811b3cbfae26761b4 Author: Lukas Krejci lkrejci@redhat.com Date: Mon Aug 19 17:52:53 2013 +0200
[BZ 986491] - Yum content source plugin now handles HTTP basic auth
It also should be able to handle HTTPS and other URL schemes if support for them is available in the RHQ server's JVM.
diff --git a/modules/enterprise/server/plugins/yum/pom.xml b/modules/enterprise/server/plugins/yum/pom.xml index e52c04b..bef9d81 100644 --- a/modules/enterprise/server/plugins/yum/pom.xml +++ b/modules/enterprise/server/plugins/yum/pom.xml @@ -32,6 +32,20 @@ <scope>provided</scope> <!-- this version of jdom is included in the server, we'll juse reuse it --> </dependency>
+ <!-- Test deps --> + <dependency> + <groupId>org.rhq</groupId> + <artifactId>test-utils</artifactId> + <version>${project.version}</version> + <scope>test</scope> + </dependency> + + <dependency> + <groupId>org.jboss.resteasy</groupId> + <artifactId>tjws</artifactId> + <version>3.0.3.Final</version> + <scope>test</scope> + </dependency> </dependencies>
<build> @@ -128,4 +142,4 @@ </profile> </profiles>
-</project> \ No newline at end of file +</project> diff --git a/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/DiskReader.java b/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/DiskReader.java index f131471..6a2227f 100644 --- a/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/DiskReader.java +++ b/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/DiskReader.java @@ -1,6 +1,6 @@ /* * RHQ Management Platform - * Copyright (C) 2005-2008 Red Hat, Inc. + * Copyright (C) 2013 Red Hat, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -16,13 +16,13 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ + package org.rhq.enterprise.server.plugins.yum;
import java.io.File; -import java.io.FileInputStream; import java.io.IOException; -import java.io.InputStream; -import java.util.zip.GZIPInputStream; +import java.net.URISyntaxException; +import java.net.URL;
/** * The disk reader is a yum repo reader used to read metadata and bits from an existing yum repo that is located on a @@ -30,56 +30,24 @@ import java.util.zip.GZIPInputStream; * * @author jortel */ -public class DiskReader implements RepoReader { - /** - * The base or root directory path of a yum repo. - */ - private final String basepath; +public class DiskReader extends UrlReader {
- /** - * Constructor. - * - * @param basepath The base or root directory path of a yum repo. - */ - public DiskReader(String basepath) { - this.basepath = basepath; + public DiskReader(URL baseUrl) { + super(baseUrl); }
/** * Validate the reader. Validates that the base path is an existing directory that is readable. * - * @throws Exception When <i>basepath</i> is not a directory, does not exist, or is not readable. + * @throws IOException When <i>baseUrl</i> is not a directory, does not exist, or is not readable. */ - public void validate() throws Exception { - File file = new File(basepath); - if (file.exists() || file.canRead() || file.isDirectory()) { + @Override + public void validate() throws IOException, URISyntaxException { + File file = new File(baseUrl.toURI().getSchemeSpecificPart()); + if (file.exists() && file.canRead() && file.isDirectory()) { return; // good }
- throw new Exception("Path: '" + basepath + "' not found, not a directory or permission denied"); - } - - /** - * Open an input stream to specifed relative path. Prepends the basepath to the <i>path</i> and opens and opens and - * input stream. - * - * @param path A relative path to a file within the repo. - * - * @return An open input stream that <b>must</b> be closed by the caller. - * - * @throws IOException On all errors. - */ - public InputStream openStream(String path) throws IOException { - InputStream in = new FileInputStream(basepath + "/" + path); - if (path.endsWith(".gz")) { - return new GZIPInputStream(in); - } - - return in; - } - - @Override - public String toString() { - return "basepath: " + basepath; + throw new IOException("Path: '" + baseUrl + "' not found, not a directory or permission denied"); } -} \ No newline at end of file +} diff --git a/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/HttpReader.java b/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/HttpReader.java index 98e115c..99eecdd 100644 --- a/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/HttpReader.java +++ b/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/HttpReader.java @@ -21,80 +21,58 @@ package org.rhq.enterprise.server.plugins.yum; import java.io.IOException; import java.io.InputStream; import java.net.HttpURLConnection; +import java.net.URISyntaxException; import java.net.URL; import java.util.zip.GZIPInputStream;
+import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import org.rhq.core.util.Base64; + /** * The http reader is a yum repo reader used to read metadata and bits from an existing (remote) yum repo using yum's * native http interface. * * @author jortel */ -public class HttpReader implements RepoReader { - /** - * The base url of a yum repo. - */ - private final String baseurl; +public class HttpReader extends UrlReader {
- /** - * The current url connection - */ - HttpURLConnection connection; + private static final Log LOG = LogFactory.getLog(RepoProvider.class); + + private final String username; + private final String password;
/** * Constructor. * - * @param basepath The base url of a yum repo. + * @param baseUrl The base url of a yum repo. + * @param username the name of the user to authenticate with or null + * @param password the password to use or null */ - public HttpReader(String baseurl) { - this.baseurl = baseurl; + public HttpReader(URL baseUrl, String username, String password) { + super(baseUrl); + this.username = username; + this.password = password; }
- /** - * Validate the reader. Validates that the base url is valid. - * - * @throws Exception When <i>baseurl</i> is not valid. - */ - public void validate() throws Exception { - URL url = new URL(baseurl); - connection = (HttpURLConnection) url.openConnection(); - connection.setRequestMethod("GET"); - try { - if (connection.getHeaderField(0) == null) { - throw new IOException("Cannot validate connection - check URL"); - } - } finally { - connection.disconnect(); - } - } - - /** - * Open an input stream to specifed relative url. Prepends the baseurl to the <i>url</i> and opens and opens and - * input stream. Files with a .gz suffix will be unziped (inline). - * - * @param suffix A url that is relative to the <i>baseurl</i> and references a file within the repo. - * - * @return An open input stream that <b>must</b> be closed by the caller. - * - * @throws IOException On all errors. - */ - public InputStream openStream(String suffix) throws IOException { - URL url = new URL(baseurl + "/" + suffix); - connection = (HttpURLConnection) url.openConnection(); - connection.setRequestMethod("GET"); - InputStream in = connection.getInputStream(); - if (suffix.endsWith(".gz")) { - return new GZIPInputStream(in); + @Override + protected InputStream doOpen(URL url) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("open " + url); }
- return in; - } + HttpURLConnection connection = (HttpURLConnection) url.openConnection(); + connection.setInstanceFollowRedirects(true);
- /* - * (non-Javadoc) @see java.lang.Object#toString() - */ - @Override - public String toString() { - return baseurl; + if (username != null) { + String userInfo = username; + if (password != null) { + userInfo += ":" + password; + } + String basicAuth = "Basic " + Base64.encode(userInfo.getBytes("ISO-8859-1")); + connection.setRequestProperty("Authorization", basicAuth); + } + return connection.getInputStream(); } -} \ No newline at end of file +} diff --git a/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/RepoProvider.java b/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/RepoProvider.java index f065ba7..2e04453 100644 --- a/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/RepoProvider.java +++ b/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/RepoProvider.java @@ -19,6 +19,9 @@ package org.rhq.enterprise.server.plugins.yum;
import java.io.InputStream; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URL; import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -84,20 +87,19 @@ public class RepoProvider implements ContentProvider, PackageSource { throw new IllegalArgumentException("Missing required 'location' property"); }
- location = trim(location); - log.info("Initialized with location: " + location); - if (location.startsWith("http://")) { - reader = new HttpReader(location); - return; - } + location = location.trim(); + String username = configuration.getSimpleValue("username"); + String password = configuration.getSimpleValue("password");
- if (location.startsWith("file://")) { - location = location.substring(7); - reader = new DiskReader(location); - return; - } + URI uri = new URI(location);
- reader = new DiskReader(location); + log.info("Initialized with location: " + location); + try { + reader = UrlReader.fromUri(uri, username, password); + } catch (MalformedURLException e) { + log.error("Could not determine a reader for the URI [" + uri + "]"); + throw e; + } }
/** @@ -176,22 +178,6 @@ public class RepoProvider implements ContentProvider, PackageSource { reader.validate(); }
- /** - * Trim white space and trailing (/) characters. - * - * @param path A url/directory path string. - * - * @return A trimmed string. - */ - private String trim(String path) { - path = path.trim(); - while ((path.length() > 1) && path.endsWith("/")) { - path = path.substring(0, path.length() - 1); - } - - return path; - } - public SyncProgressWeight getSyncProgressWeight() { return SyncProgressWeight.DEFAULT_WEIGHTS; } diff --git a/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/UrlReader.java b/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/UrlReader.java new file mode 100644 index 0000000..682b319 --- /dev/null +++ b/modules/enterprise/server/plugins/yum/src/main/java/org/rhq/enterprise/server/plugins/yum/UrlReader.java @@ -0,0 +1,111 @@ +/* + * RHQ Management Platform + * Copyright (C) 2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +package org.rhq.enterprise.server.plugins.yum; + +import java.io.IOException; +import java.io.InputStream; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.net.URLConnection; +import java.util.zip.GZIPInputStream; + +/** + * @author Lukas Krejci + * @since 4.9 + */ +public class UrlReader implements RepoReader { + + protected final URL baseUrl; + + public static UrlReader fromUri(URI uri, String username, String password) throws MalformedURLException { + if (uri.getScheme() == null) { + try { + return new DiskReader(new URI("file", uri.getSchemeSpecificPart(), uri.getFragment()).toURL()); + } catch (URISyntaxException e) { + throw new IllegalStateException( + "URI syntax exception while adding the 'file' scheme to a path. This should not have happened.", e); + } + } else if (uri.getScheme().equals("file")) { + return new DiskReader(uri.toURL()); + } else if (uri.getScheme().startsWith("http")) { + return new HttpReader(uri.toURL(), username, password); + } else { + return new UrlReader(uri.toURL()); + } + } + + protected UrlReader(URL baseUrl) { + this.baseUrl = baseUrl; + + } + + public void validate() throws IOException, URISyntaxException { + InputStream content = doOpen(baseUrl); + content.close(); + } + + /** + * Open an input stream to specifed relative url. Prepends the baseurl to the <i>url</i> and opens and opens and + * input stream. Files with a .gz suffix will be unziped (inline). + * + * @param path A path that is relative to the <i>baseurl</i> and references a file within the repo. + * + * @return An open input stream that <b>must</b> be closed by the caller. + * + * @throws IOException On all errors. + */ + @Override + public final InputStream openStream(String path) throws IOException { + URL url = extendBaseUrl(path); + + InputStream ret = doOpen(url); + if (path.endsWith(".gz")) { + ret = new GZIPInputStream(ret); + } + + return ret; + } + + protected InputStream doOpen(URL url) throws IOException { + return url.openStream(); + } + + /** + * Mainly used for test purposes, othewise not really useful. + */ + public URL getBaseURL() { + return baseUrl; + } + + protected URL extendBaseUrl(String suffix) throws MalformedURLException { + if (suffix != null) { + suffix = suffix.trim(); + } + + return suffix == null ? baseUrl : new URL(baseUrl + "/" + suffix); + } + + @Override + public String toString() { + return getClass().getSimpleName() + " " + baseUrl; + } +} diff --git a/modules/enterprise/server/plugins/yum/src/main/resources/META-INF/rhq-serverplugin.xml b/modules/enterprise/server/plugins/yum/src/main/resources/META-INF/rhq-serverplugin.xml index b501235..2ced81c 100644 --- a/modules/enterprise/server/plugins/yum/src/main/resources/META-INF/rhq-serverplugin.xml +++ b/modules/enterprise/server/plugins/yum/src/main/resources/META-INF/rhq-serverplugin.xml @@ -24,7 +24,11 @@ type="string" required="true" description="The URL or path to the Yum repository" /> + <c:simple-property name="username" type="string" required="false" + description="The optional user name to authenticate with"/> + <c:simple-property name="password" type="password" required="false" + description="The optional password to authenticate with"/> </configuration> </contentSourceType>
-</content-plugin> \ No newline at end of file +</content-plugin> diff --git a/modules/enterprise/server/plugins/yum/src/test/java/Acme/Serve/UrlReaderTestServer.java b/modules/enterprise/server/plugins/yum/src/test/java/Acme/Serve/UrlReaderTestServer.java new file mode 100644 index 0000000..43f3d33 --- /dev/null +++ b/modules/enterprise/server/plugins/yum/src/test/java/Acme/Serve/UrlReaderTestServer.java @@ -0,0 +1,64 @@ +/* + * RHQ Management Platform + * Copyright (C) 2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +package Acme.Serve; + +import java.io.PrintStream; +import java.util.Map; +import java.util.Properties; + +/** + * This needs to be in the {@code Acme.Serve} package so that authentication realm can be defined. + * + * @author Lukas Krejci + * @since 4.9 + */ +public class UrlReaderTestServer extends Serve { + private static final long serialVersionUID = 1L; + + + public static class AuthRealm extends BasicAuthRealm { + + private static final long serialVersionUID = 1L; + + public AuthRealm(String name) { + super(name); + } + } + + public UrlReaderTestServer(Map arguments, PrintStream logStream) { + super(arguments, logStream); + } + + @Override + public void setMappingTable(PathTreeDictionary mappingTable) { + super.setMappingTable(mappingTable); + } + + @Override + protected void initMime() { + mime = new Properties(); + mime.put("file", "text/plain"); + } + + @Override + public void setRealms(PathTreeDictionary realms) { + super.setRealms(realms); + } +} diff --git a/modules/enterprise/server/plugins/yum/src/test/java/org/rhq/enterprise/server/plugins/yum/UrlReaderTest.java b/modules/enterprise/server/plugins/yum/src/test/java/org/rhq/enterprise/server/plugins/yum/UrlReaderTest.java new file mode 100644 index 0000000..62e9c18 --- /dev/null +++ b/modules/enterprise/server/plugins/yum/src/test/java/org/rhq/enterprise/server/plugins/yum/UrlReaderTest.java @@ -0,0 +1,195 @@ +package org.rhq.enterprise.server.plugins.yum;/* + * RHQ Management Platform + * Copyright (C) 2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +import static org.testng.Assert.assertEquals; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.Reader; +import java.net.InetAddress; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.HashMap; +import java.util.Map; + +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.testng.Assert; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +import org.rhq.core.util.stream.StreamUtil; +import org.rhq.test.PortScout; + +import Acme.Serve.Serve; +import Acme.Serve.UrlReaderTestServer; + +/** + * @author Lukas Krejci + * @since 4.9 + */ +@Test +public class UrlReaderTest { + + private static final String TEST_USER = "testUser"; + private static final String TEST_PASSWORD = "password"; + + private static class AuthServlet extends HttpServlet { + + private static final long serialVersionUID = 1L; + + @Override + protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { + String authType = req.getAuthType(); + String remoteUser = req.getRemoteUser(); + + assertEquals(authType, "BASIC", "Unexpected authentication type"); + assertEquals(remoteUser, TEST_USER, "Unexpected authenticated user."); + + String path = req.getPathTranslated(); + if (path != null) { + FileInputStream in = new FileInputStream(path); + try { + StreamUtil.copy(in, resp.getOutputStream(), false); + } finally { + in.close(); + } + } + } + } + + private UrlReaderTestServer httpServer; + private String rootUrl; + + @BeforeClass + public void startWebServer() throws IOException, URISyntaxException { + PortScout portScout = new PortScout(); + int httpPort = portScout.getNextFreePort(); + + Map<String, Object> params = new HashMap<String, Object>(); + params.put(Serve.ARG_PORT, httpPort); + params.put(Serve.ARG_NOHUP, "nohup"); + + httpServer = new UrlReaderTestServer(params, System.err); + + Serve.PathTreeDictionary aliases = new Serve.PathTreeDictionary(); + File root = getRoot(); + aliases.put("/", root); + aliases.put("/*", root); + + httpServer.setMappingTable(aliases); + httpServer.addDefaultServlets(null); + + httpServer.addServlet("/auth", new AuthServlet()); + + UrlReaderTestServer.AuthRealm authRealm = new UrlReaderTestServer.AuthRealm("auth"); + authRealm.put(TEST_USER, TEST_PASSWORD); + + Serve.PathTreeDictionary realms = new Serve.PathTreeDictionary(); + realms.put("/auth", authRealm); + + httpServer.setRealms(realms); + portScout.close(); + + httpServer.runInBackground(); + + rootUrl = InetAddress.getLocalHost().getHostAddress() + ":" + httpPort; + } + + @AfterClass(alwaysRun = true) + public void stopWebServer() throws IOException { + httpServer.stopBackground(); + httpServer.destroyAllServlets(); + } + + public void picksCorrectImpl() throws Exception { + URI httpUrl = new URI("http://jboss.org/rhq"); + URI httpsUrl = new URI("https://jboss.org/rhq"); + URI noSchemeUrl = new URI("stairway/to/heaven"); + URI fileUrl = new URI("file:/over/the/rainbow"); + + UrlReader httpRdr = UrlReader.fromUri(httpUrl, null, null); + UrlReader httpsRdr = UrlReader.fromUri(httpsUrl, null, null); + UrlReader noSchemeRdr = UrlReader.fromUri(noSchemeUrl, null, null); + UrlReader fileRdr = UrlReader.fromUri(fileUrl, null, null); + + assertReader(httpRdr, httpUrl.toURL(), HttpReader.class); + assertReader(httpsRdr, httpsUrl.toURL(), HttpReader.class); + assertReader(noSchemeRdr, new URL("file:stairway/to/heaven"), DiskReader.class); + assertReader(fileRdr, fileUrl.toURL(), DiskReader.class); + } + + public void readsFiles() throws Exception { + UrlReader fileReader = UrlReader.fromUri(getRoot().toURI(), null, null); + + testReaderWithTestFile(fileReader); + } + + public void readsHttp() throws Exception { + URI uri = new URI("http://" + rootUrl); + + UrlReader httpReader = UrlReader.fromUri(uri, null, null); + + testReaderWithTestFile(httpReader); + } + + public void authenticatesInHttp() throws Exception { + URI uri = new URI("http://" + rootUrl + "/auth"); + + UrlReader httpReader = UrlReader.fromUri(uri, TEST_USER, TEST_PASSWORD); + + testReaderWithTestFile(httpReader); + } + + private static void assertReader(UrlReader instance, URL expectedUrl, Class<? extends UrlReader> expectedType) { + assertEquals(instance.getClass(), expectedType, "Unexpected reader type"); + assertEquals(instance.getBaseURL(), expectedUrl, "Unexpected baseUrl"); + } + + private void testReaderWithTestFile(UrlReader reader) throws IOException, URISyntaxException { + try { + reader.validate(); + } catch (IOException e) { + Assert.fail("Validation of " + reader.getClass().getSimpleName() + " reader failed", e); + } + + Reader rdr = new InputStreamReader(reader.openStream("test.file")); + try { + String contents = StreamUtil.slurp(rdr); + + assertEquals(contents, "kachny\n", "Unexpected contents of the test file"); + } finally { + rdr.close(); + } + } + + private File getRoot() throws URISyntaxException { + URI testUri = getClass().getResource("/test.file").toURI(); + + File testFile = new File(testUri.getSchemeSpecificPart()); + return testFile.getParentFile(); + } +} diff --git a/modules/enterprise/server/plugins/yum/src/test/resources/test.file b/modules/enterprise/server/plugins/yum/src/test/resources/test.file new file mode 100644 index 0000000..8742087 --- /dev/null +++ b/modules/enterprise/server/plugins/yum/src/test/resources/test.file @@ -0,0 +1 @@ +kachny
commit cece52f8f9a1c44269ae6e76ad73b0251da09cfe Author: Jirka Kremser jkremser@redhat.com Date: Mon Aug 19 14:38:10 2013 +0200
Fixing/handling errors in UI when storage node has no associated resource id (this can happen when installing everything with "rhqctl install --agent-auto-start false").
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java index d6a91cb..d1ea625 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java @@ -122,15 +122,17 @@ public class StorageNodeAdminView extends EnhancedVLayout implements Bookmarkabl GWTServiceLookup.getStorageService().findResourcesWithAlertDefinitions(new AsyncCallback<Integer[]>() { @Override public void onFailure(Throwable caught) { - Message message = new Message("foobar", - Message.Severity.Warning); + Message message = new Message("Unable to render storage node alert view: " + + caught.getMessage(), Message.Severity.Warning); CoreGUI.goToView(VIEW_ID.getName(), message); }
@Override public void onSuccess(Integer[] result) { if (result == null || result.length == 0) { - onFailure(new Exception("foobaz")); + onFailure(new Exception( + "Unfortunately, there are no associated resources for the available storage nodes. " + + "Check if the agents are running on the machines where the storage nodes are deployed.")); } else { resIds = ArrayUtils.unwrapArray(result); tabset.getTabByName(tabInfo.name.getName()).setPane( diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java index a89bb81..685fb5d 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java @@ -250,7 +250,8 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNodeLoadComposit } int value = from.getUnackAlerts(); record.setAttribute(FIELD_ALERTS.propertyName(), - StorageNodeAdminView.getAlertsString("New Alerts", node.getId(), value)); + node.getResource() != null ? StorageNodeAdminView.getAlertsString("New Alerts", node.getId(), value) + : "New Alerts (0)"); String memory = null; if (from.getHeapPercentageUsed() != null && from.getHeapPercentageUsed().getAggregate().getAvg() != null) memory = MeasurementConverterClient.format(from.getHeapPercentageUsed().getAggregate().getAvg(), from diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java index 6fdae0c..4055e6f 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDetailView.java @@ -23,7 +23,6 @@ import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDat import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_ALERTS; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_CQL_PORT; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_CTIME; -import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_FAILED_OPERATION; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_JMX_PORT; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_MTIME; import static org.rhq.enterprise.gui.coregui.client.admin.storage.StorageNodeDatasourceField.FIELD_OPERATION_MODE; @@ -67,7 +66,6 @@ import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.MeasurementUtility; import org.rhq.enterprise.gui.coregui.client.util.StringUtility; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedHLayout; -import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedUtility; import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; import org.rhq.enterprise.gui.coregui.client.util.message.Message;
@@ -130,9 +128,11 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab final StorageNode node = storageNodes.get(0); header.setContents("<div style='text-align: center; font-weight: bold; font-size: medium;'> Storage Node (" + node.getAddress() + ")</div>"); - fetchStorageNodeConfigurationComposite(node); + prepareDetailsSection(node); + fetchStorageNodeConfigurationComposite(node); fetchSparkLineDataForLoadComponent(node); + fetchUnackAlerts(storageNodeId, node.getResource() != null); }
public void onFailure(Throwable caught) { @@ -142,45 +142,76 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab initSectionCount = SECTION_COUNT; } }); - fetchUnackAlerts(storageNodeId); }
private void fetchStorageNodeConfigurationComposite(final StorageNode node) { - GWTServiceLookup.getStorageService().retrieveConfiguration(node, - new AsyncCallback<StorageNodeConfigurationComposite>() { - @Override - public void onFailure(Throwable caught) { - Message message = new Message(MSG.view_configurationHistoryDetails_error_loadFailure(), - Message.Severity.Warning); - initSectionCount = SECTION_COUNT; - } + if (node.getResource() == null) { // no associated resource yet + LayoutSpacer spacer = new LayoutSpacer(); + spacer.setHeight(15); + HTMLFlow info = new HTMLFlow("<h2>There is no configuration available for this node. Is the agent running on the " + + node.getAddress() + "?</h2>"); + SectionStackSection section = new SectionStackSection("Configuration"); + section.setItems(spacer, info); + section.setExpanded(true); + section.setCanCollapse(false); + + configurationSection = section; + initSectionCount++; + } else { + GWTServiceLookup.getStorageService().retrieveConfiguration(node, + new AsyncCallback<StorageNodeConfigurationComposite>() { + @Override + public void onFailure(Throwable caught) { + Message message = new Message(MSG.view_configurationHistoryDetails_error_loadFailure(), + Message.Severity.Warning); + initSectionCount = SECTION_COUNT; + }
- @Override - public void onSuccess(StorageNodeConfigurationComposite result) { - prepareResourceConfigEditor(result); - } - }); + @Override + public void onSuccess(StorageNodeConfigurationComposite result) { + prepareResourceConfigEditor(result); + } + }); + } }
private void fetchSparkLineDataForLoadComponent(final StorageNode storageNode) { + if (storageNode.getResource() == null) { + HTMLFlow info = new HTMLFlow("<i>No load data available.</i>"); + info.setExtraSpace(5); + loadLayout = new EnhancedVLayout(); + loadLayout.setWidth100(); + LayoutSpacer spacer = new LayoutSpacer(); + spacer.setHeight(10); + HTMLFlow loadLabel = new HTMLFlow("Status"); + loadLabel.addStyleName("formTitle"); + loadLabel.setHoverWidth(300); + loadLayout.setMembers(spacer, loadLabel, info); + + if (detailsAndLoadLayout == null) { + detailsAndLoadLayout = new EnhancedHLayout(); + } + initSectionCount++; + } else { + GWTServiceLookup.getStorageService().findStorageNodeLoadDataForLast(storageNode, 8, + MeasurementUtility.UNIT_HOURS, 60, + new AsyncCallback<Map<String, List<MeasurementDataNumericHighLowComposite>>>() { + @Override + public void onFailure(Throwable caught) {
- GWTServiceLookup.getStorageService().findStorageNodeLoadDataForLast(storageNode, 8, MeasurementUtility.UNIT_HOURS, - 60, new AsyncCallback<Map<String, List<MeasurementDataNumericHighLowComposite>>>() { - @Override - public void onFailure(Throwable caught) { - - } + }
- @Override - public void onSuccess(Map<String, List<MeasurementDataNumericHighLowComposite>> result) { - prepareLoadSection(sectionStack, storageNode, result); - } + @Override + public void onSuccess(Map<String, List<MeasurementDataNumericHighLowComposite>> result) { + prepareLoadSection(sectionStack, storageNode, result); + }
- }); + }); + } }
- private void fetchUnackAlerts(final int storageNodeId) { + private void fetchUnackAlerts(final int storageNodeId, final boolean isResourceIdSet) { GWTServiceLookup.getStorageService().findNotAcknowledgedStorageNodeAlertsCounts(Arrays.asList(storageNodeId), new AsyncCallback<List<Integer>>() { @Override @@ -198,7 +229,7 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab } else { unackAlerts = result.get(0); if (alertsItem != null) { - alertsItem.setValue(StorageNodeAdminView.getAlertsString("New Alerts", storageNodeId, unackAlerts)); + alertsItem.setValue(isResourceIdSet ? StorageNodeAdminView.getAlertsString("New Alerts", storageNodeId, unackAlerts) : "New Alerts (0)"); } } } @@ -263,15 +294,16 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab
final StaticTextItem cqlPortItem = new StaticTextItem(FIELD_CQL_PORT.propertyName(), FIELD_CQL_PORT.title()); cqlPortItem.setValue(storageNode.getCqlPort()); - + final StaticTextItem jmxPortItem = new StaticTextItem(FIELD_JMX_PORT.propertyName(), FIELD_JMX_PORT.title()); jmxPortItem.setValue(storageNode.getJmxPort());
-// final StaticTextItem jmxConnectionUrlItem = new StaticTextItem("jmxConnectionUrl", -// MSG.view_adminTopology_storageNode_jmxConnectionUrl()); -// jmxConnectionUrlItem.setValue(storageNode.getJMXConnectionURL()); + // final StaticTextItem jmxConnectionUrlItem = new StaticTextItem("jmxConnectionUrl", + // MSG.view_adminTopology_storageNode_jmxConnectionUrl()); + // jmxConnectionUrlItem.setValue(storageNode.getJMXConnectionURL());
- final StaticTextItem operationModeItem = new StaticTextItem(FIELD_OPERATION_MODE.propertyName(), MSG.view_adminTopology_serverDetail_operationMode()); + final StaticTextItem operationModeItem = new StaticTextItem(FIELD_OPERATION_MODE.propertyName(), + MSG.view_adminTopology_serverDetail_operationMode()); operationModeItem.setValue(storageNode.getOperationMode());
// make clickable link to associated resource @@ -294,35 +326,52 @@ public class StorageNodeDetailView extends EnhancedVLayout implements Bookmarkab StaticTextItem lastUpdateItem = new StaticTextItem(FIELD_MTIME.propertyName(), FIELD_MTIME.title()); lastUpdateItem.setValue(TimestampCellFormatter.format(Long.valueOf(storageNode.getMtime()), TimestampCellFormatter.DATE_TIME_FORMAT_LONG)); - + alertsItem = new StaticTextItem(FIELD_ALERTS.propertyName(), FIELD_ALERTS.title()); - alertsItem.setPrompt("The number in brackets represents the number of unacknowledged alerts for this storage node."); + alertsItem + .setPrompt("The number in brackets represents the number of unacknowledged alerts for this storage node."); if (unackAlerts != -1) { alertsItem.setValue(StorageNodeAdminView.getAlertsString("New Alerts", storageNodeId, unackAlerts)); } - - StaticTextItem message = new StaticTextItem("message", "Note"); - message.setValue(storageNode.getErrorMessage() == null ? "Everything is ok" : storageNode.getErrorMessage()); - + + StaticTextItem messageItem = new StaticTextItem("message", "Note"); + StringBuffer message = new StringBuffer(); + boolean isOk = true; + if (storageNode.getResource() == null) { + message.append("Storage node has no associated resource.<br />"); + isOk = false; + } + if (storageNode.getErrorMessage() != null) { + message.append(storageNode.getErrorMessage()).append("<br />"); + isOk = false; + } + if (isOk) { + message.append("Everything is ok"); + } + messageItem.setValue(message); + StaticTextItem lastOperation = null; - boolean isOperationFailed = storageNode.getFailedOperation() != null && storageNode.getFailedOperation().getResource() != null; + boolean isOperationFailed = storageNode.getFailedOperation() != null + && storageNode.getFailedOperation().getResource() != null; if (isOperationFailed) { ResourceOperationHistory operationHistory = storageNode.getFailedOperation(); - String value = LinkManager.getSubsystemResourceOperationHistoryLink(operationHistory.getResource().getId(), operationHistory.getId()); -// String value = "#Resource/" + operationHistory.getResource().getId() + "/Operations/History/" + operationHistory.getId()); + String value = LinkManager.getSubsystemResourceOperationHistoryLink(operationHistory.getResource().getId(), + operationHistory.getId()); + // String value = "#Resource/" + operationHistory.getResource().getId() + "/Operations/History/" + operationHistory.getId()); lastOperation = new StaticTextItem("lastOp", "Operation"); - lastOperation.setValue(LinkManager.getHref(value, operationHistory.getOperationDefinition().getDisplayName())); + lastOperation.setValue(LinkManager.getHref(value, operationHistory.getOperationDefinition() + .getDisplayName())); } - - - + List<FormItem> formItems = new ArrayList<FormItem>(6); - formItems.addAll(Arrays.asList(nameItem, resourceItem,cqlPortItem, jmxPortItem/*, jmxConnectionUrlItem*/)); - if (!CoreGUI.isDebugMode()) formItems.add(operationModeItem); // debug mode fails if this item is added - formItems.addAll(Arrays.asList(installationDateItem, lastUpdateItem, alertsItem, message)); - if (isOperationFailed) formItems.add(lastOperation); - form.setItems(formItems.toArray(new FormItem[]{})); - + formItems.addAll(Arrays.asList(nameItem, resourceItem, cqlPortItem, jmxPortItem/*, jmxConnectionUrlItem*/)); + if (!CoreGUI.isDebugMode()) + formItems.add(operationModeItem); // debug mode fails if this item is added + formItems.addAll(Arrays.asList(installationDateItem, lastUpdateItem, alertsItem, messageItem)); + if (isOperationFailed) + formItems.add(lastOperation); + form.setItems(formItems.toArray(new FormItem[] {})); + detailsLayout = new EnhancedVLayout(); detailsLayout.setWidth(450); detailsLayout.addMember(form); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java index efe56e3..84c1586 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeTableView.java @@ -27,7 +27,7 @@ import java.util.Arrays; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import org.rhq.enterprise.gui.coregui.client.util.Log; + import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; import com.smartgwt.client.data.Criteria; @@ -50,6 +50,7 @@ import org.rhq.enterprise.gui.coregui.client.components.table.AuthorizedTableAct import org.rhq.enterprise.gui.coregui.client.components.table.TableActionEnablement; import org.rhq.enterprise.gui.coregui.client.components.table.TableSection; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; +import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.StringUtility; import org.rhq.enterprise.gui.coregui.client.util.async.Command; import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch; @@ -83,6 +84,7 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> { @Override protected void doOnDraw() { super.doOnDraw(); + // commenting out this call, because it caused UI to freeze // scheduleUnacknowledgedAlertsPollingJob(getListGrid()); }
@@ -178,6 +180,11 @@ public class StorageNodeTableView extends TableSection<StorageNodeDatasource> { ListGrid listGrid = new ListGrid() { @Override protected Canvas getExpansionComponent(final ListGridRecord record) { + if (record.getAttribute(FIELD_RESOURCE_ID.propertyName()) == null) { + // no resource set + return new HTMLFlow("There is no load data available for this node. Is the agent running on the " + + record.getAttributeAsString(FIELD_ADDRESS.propertyName() + "?")); + } int id = record.getAttributeAsInt(FIELD_ID); return new StorageNodeLoadComponent(id, null); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 04abeb88..55593d3 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -469,10 +469,15 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN long endTime = System.currentTimeMillis(); long beginTime = endTime - (8 * 60 * 60 * 1000); for (StorageNode node : nodes) { - StorageNodeLoadComposite composite = getLoad(subjectManager.getOverlord(), node, beginTime, endTime); - int unackAlerts = findNotAcknowledgedStorageNodeAlerts(subjectManager.getOverlord(), node).size(); - composite.setUnackAlerts(unackAlerts); - result.add(composite); + if (node.getOperationMode() != OperationMode.INSTALLED) { + StorageNodeLoadComposite composite = getLoad(subjectManager.getOverlord(), node, beginTime, endTime); + int unackAlerts = findNotAcknowledgedStorageNodeAlerts(subjectManager.getOverlord(), node).size(); + composite.setUnackAlerts(unackAlerts); + result.add(composite); + } else { // newly installed node + result.add(new StorageNodeLoadComposite(node, beginTime, endTime)); + } + } return result; }
commit 144e622efb8ec61816b4716f6f696e2330e3f4e9 Author: Stefan Negrea snegrea@redhat.com Date: Sat Aug 17 19:25:18 2013 -0500
[BZ 993513] Baselines entries are no longer calculated and inserted into the SQL database if there is no data in the storage.
The sql query that returns the schedules that need recomputation has been updated to return only the ids of the schedules, reducing the amount of data requested from SQL to the minimum.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/measurement/MeasurementBaseline.java b/modules/core/domain/src/main/java/org/rhq/core/domain/measurement/MeasurementBaseline.java index 5bd0e8b..681914d 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/measurement/MeasurementBaseline.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/measurement/MeasurementBaseline.java @@ -94,6 +94,8 @@ public class MeasurementBaseline implements Serializable { @OneToOne(fetch = FetchType.LAZY, optional = false) private MeasurementSchedule schedule;
+ @Column(name = "SCHEDULE_ID", insertable = false, updatable = false) + private int scheduleId;
public MeasurementBaseline() { computeTime = System.currentTimeMillis(); @@ -176,6 +178,20 @@ public class MeasurementBaseline implements Serializable { }
/** + * @return the scheduleId + */ + public int getScheduleId() { + return scheduleId; + } + + /** + * @param scheduleId the scheduleId to set + */ + public void setScheduleId(int scheduleId) { + this.scheduleId = scheduleId; + } + + /** * If <code>true</code>, it means a user manually entered the baseline values, as opposed to having them * automatically be calculated by examining past measurement data. * diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementBaselineManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementBaselineManagerBean.java index 90ac6e7..ed8db3a 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementBaselineManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementBaselineManagerBean.java @@ -18,10 +18,9 @@ */ package org.rhq.enterprise.server.measurement;
+import java.util.ArrayList; import java.util.Arrays; import java.util.Date; -import java.util.Iterator; -import java.util.LinkedList; import java.util.List; import java.util.Properties;
@@ -48,10 +47,10 @@ import org.rhq.enterprise.server.RHQConstants; import org.rhq.enterprise.server.auth.SubjectManagerLocal; import org.rhq.enterprise.server.authz.AuthorizationManagerLocal; import org.rhq.enterprise.server.authz.PermissionException; -import org.rhq.enterprise.server.storage.StorageClientManagerBean; import org.rhq.enterprise.server.cloud.StatusManagerLocal; import org.rhq.enterprise.server.measurement.instrumentation.MeasurementMonitor; import org.rhq.enterprise.server.resource.ResourceManagerLocal; +import org.rhq.enterprise.server.storage.StorageClientManagerBean; import org.rhq.enterprise.server.system.SystemManagerLocal; import org.rhq.server.metrics.MetricsBaselineCalculator;
@@ -148,8 +147,6 @@ public class MeasurementBaselineManagerBean implements MeasurementBaselineManage + (System.currentTimeMillis() - now) + ")ms");
now = System.currentTimeMillis(); - int totalProcessed = 0; - while (true) { /* * each call is done in a separate xtn of at most 100K inserted rows; this helps to keep the xtn * shorter to avoid timeouts in scenarios where baseline calculations bunch together. the idea was that @@ -189,16 +186,22 @@ public class MeasurementBaselineManagerBean implements MeasurementBaselineManage * In any event, an appropriate chunking solution needs to be found, and that partitioning strategy * needs to replace the limits in the query today. */ - List<MeasurementSchedule> schedulesWithoutBaselines = - measurementBaselineManager.getSchedulesWithoutBaselines(); - measurementBaselineManager.calculateBaselines(schedulesWithoutBaselines, now, amountOfData); - totalProcessed += schedulesWithoutBaselines.size(); - - if (schedulesWithoutBaselines.size() < BASELINE_PROCESSING_LIMIT) { - break; + List<Integer> schedulesWithoutBaselines = measurementBaselineManager.getSchedulesWithoutBaselines(); + + List<Integer> accumulator = new ArrayList<Integer>(); + for (Integer value : schedulesWithoutBaselines) { + accumulator.add(value); + if (accumulator.size() == BASELINE_PROCESSING_LIMIT) { + measurementBaselineManager.calculateBaselines(accumulator, now, amountOfData); + accumulator.clear(); } } - log.info("Calculated and inserted [" + totalProcessed + "] new baselines. (" + if (!accumulator.isEmpty()) { + measurementBaselineManager.calculateBaselines(accumulator, now, amountOfData); + accumulator.clear(); + } + + log.info("Calculated and inserted [" + schedulesWithoutBaselines.size() + "] new baselines. (" + (System.currentTimeMillis() - now) + ")ms");
MeasurementMonitor.getMBean().incrementBaselineCalculationTime(System.currentTimeMillis() - computeTime); @@ -222,18 +225,17 @@ public class MeasurementBaselineManagerBean implements MeasurementBaselineManage
@SuppressWarnings("unchecked") @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) - public List<MeasurementSchedule> getSchedulesWithoutBaselines() { - String sql = - "SELECT s.* FROM rhq_measurement_sched s INNER JOIN rhq_measurement_def d ON s.definition = d.id " + - "LEFT JOIN rhq_measurement_bline b ON s.id = b.schedule_id WHERE b.schedule_id IS NULL AND d.numeric_type = 0"; - Query query = this.entityManager.createNativeQuery(sql, MeasurementSchedule.class); - query.setMaxResults(BASELINE_PROCESSING_LIMIT); + public List<Integer> getSchedulesWithoutBaselines() { + final String sql = + "SELECT s.id FROM rhq_measurement_sched s INNER JOIN rhq_measurement_def d ON s.definition = d.id " + + "LEFT JOIN rhq_measurement_bline b ON s.id = b.schedule_id WHERE s.enabled = true AND b.schedule_id IS NULL AND d.numeric_type = 0"; + Query query = this.entityManager.createNativeQuery(sql);
return query.getResultList(); }
@TransactionAttribute(TransactionAttributeType.NOT_SUPPORTED) - public void calculateBaselines(List<MeasurementSchedule> schedules, long olderThan, long amountOfData) { + public void calculateBaselines(List<Integer> schedules, long olderThan, long amountOfData) { long endTime = olderThan; long startTime = endTime - amountOfData;
@@ -242,30 +244,21 @@ public class MeasurementBaselineManagerBean implements MeasurementBaselineManage long calcStartTime = System.currentTimeMillis(); List<MeasurementBaseline> results = baselineCalculator.calculateBaselines(schedules, startTime, endTime); long calcEndTime = System.currentTimeMillis(); - int count = results.size();
if (log.isDebugEnabled()) { - log.debug("Finished computing " + count + " new baselines in " + (calcEndTime - calcStartTime) + " ms"); + log.debug("Finished computing " + results.size() + " new baselines in " + (calcEndTime - calcStartTime) + + " ms"); }
log.debug("Persisting baselines calculations"); long saveStartTime = System.currentTimeMillis(); - Iterator<MeasurementBaseline> iterator = results.iterator(); - List<MeasurementBaseline> queue = new LinkedList<MeasurementBaseline>(); - while (iterator.hasNext()) { - if (queue.size() == 10) { - measurementBaselineManager.saveNewBaselines(queue); - queue = new LinkedList<MeasurementBaseline>(); - } - queue.add(iterator.next()); - } - if (!queue.isEmpty()) { - measurementBaselineManager.saveNewBaselines(queue); - } + + measurementBaselineManager.saveNewBaselines(results);
long saveEndTime = System.currentTimeMillis(); if (log.isDebugEnabled()) { - log.debug("Finished persisting " + count + " baselines in " + (saveEndTime - saveStartTime) + " ms"); + log.debug("Finished persisting " + results.size() + " baselines in " + (saveEndTime - saveStartTime) + + " ms"); } }
@@ -273,7 +266,10 @@ public class MeasurementBaselineManagerBean implements MeasurementBaselineManage @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public void saveNewBaselines(List<MeasurementBaseline> baselines) { for (MeasurementBaseline baseline : baselines) { - entityManager.merge(baseline); + MeasurementSchedule schedule = new MeasurementSchedule(); + schedule.setId(baseline.getScheduleId()); + baseline.setSchedule(schedule); + this.entityManager.merge(baseline); } }
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementBaselineManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementBaselineManagerLocal.java index af6145a..35b8099 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementBaselineManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/measurement/MeasurementBaselineManagerLocal.java @@ -24,7 +24,6 @@ import javax.ejb.Local;
import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.measurement.MeasurementBaseline; -import org.rhq.core.domain.measurement.MeasurementSchedule; import org.rhq.core.domain.resource.Resource;
/** @@ -73,7 +72,7 @@ public interface MeasurementBaselineManagerLocal { * will be called repeatedly during baseline calculations to get all of the necessary * schedules. */ - List<MeasurementSchedule> getSchedulesWithoutBaselines(); + List<Integer> getSchedulesWithoutBaselines();
/** * Given a list of schedules, this method calculates and stores baselines using the @@ -87,7 +86,7 @@ public interface MeasurementBaselineManagerLocal { * is treated as a duration. For example, a value of 259200000 * would be treated as 3 days. */ - void calculateBaselines(List<MeasurementSchedule> schedules, long olderThan, long amountOfData); + void calculateBaselines(List<Integer> schedules, long olderThan, long amountOfData);
/** * Persists the newly calculated baselines. @@ -148,7 +147,7 @@ public interface MeasurementBaselineManagerLocal { /** * Return a list of {@link MeasurementBaseline} objects for the {@link Resource} represented by the given id. * - * @param subject the user request to view the baseline history for the given resource + * @param subject the user request to view the baseline history for the given resource * @param resourceId the id of the resource whose baselines are to be returned * * @return a list of baselines for all measurements scheduled on the given resource diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsBaselineCalculator.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsBaselineCalculator.java index 6242df6..ef7d092 100644 --- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsBaselineCalculator.java +++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsBaselineCalculator.java @@ -31,7 +31,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
import org.rhq.core.domain.measurement.MeasurementBaseline; -import org.rhq.core.domain.measurement.MeasurementSchedule; import org.rhq.server.metrics.domain.AggregateSimpleNumericMetric; import org.rhq.server.metrics.domain.AggregateType;
@@ -48,12 +47,12 @@ public class MetricsBaselineCalculator { this.metricsDAO = metricsDAO; }
- public List<MeasurementBaseline> calculateBaselines(List<MeasurementSchedule> schedules, long startTime, + public List<MeasurementBaseline> calculateBaselines(List<Integer> schedules, long startTime, long endTime) { List<MeasurementBaseline> calculatedBaselines = new ArrayList<MeasurementBaseline>();
MeasurementBaseline measurementBaseline; - for (MeasurementSchedule schedule : schedules) { + for (Integer schedule : schedules) { measurementBaseline = this.calculateBaseline(schedule, startTime, endTime); if (measurementBaseline != null) { calculatedBaselines.add(measurementBaseline); @@ -63,9 +62,9 @@ public class MetricsBaselineCalculator { return calculatedBaselines; }
- private MeasurementBaseline calculateBaseline(MeasurementSchedule schedule, long startTime, long endTime) { - Iterable<AggregateSimpleNumericMetric> metrics = this.metricsDAO.findAggregatedSimpleOneHourMetric( - schedule.getId(), startTime, endTime); + private MeasurementBaseline calculateBaseline(Integer schedule, long startTime, long endTime) { + Iterable<AggregateSimpleNumericMetric> metrics = this.metricsDAO.findAggregatedSimpleOneHourMetric(schedule, + startTime, endTime);
if (metrics != null && metrics.iterator() != null && metrics.iterator().hasNext()) { ArithmeticMeanCalculator mean = new ArithmeticMeanCalculator(); @@ -104,7 +103,7 @@ public class MetricsBaselineCalculator { baseline.setMax(max); baseline.setMin(min); baseline.setMean(mean.getArithmeticMean()); - baseline.setSchedule(schedule); + baseline.setScheduleId(schedule);
if (log.isDebugEnabled()) { log.debug("Calculated baseline: " + baseline.toString()); @@ -113,12 +112,6 @@ public class MetricsBaselineCalculator { return baseline; }
- MeasurementBaseline baseline = new MeasurementBaseline(); - baseline.setMax(Double.NaN); - baseline.setMin(Double.NaN); - baseline.setMean(Double.NaN); - baseline.setSchedule(schedule); - - return baseline; + return null; } } diff --git a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsBaselineCalculatorTest.java b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsBaselineCalculatorTest.java index 2b58222..1137dca 100644 --- a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsBaselineCalculatorTest.java +++ b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsBaselineCalculatorTest.java @@ -27,7 +27,6 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.when; -import static org.rhq.test.AssertUtils.assertPropertiesMatch; import static org.testng.Assert.assertEquals;
import java.util.ArrayList; @@ -44,7 +43,6 @@ import org.testng.annotations.ObjectFactory; import org.testng.annotations.Test;
import org.rhq.core.domain.measurement.MeasurementBaseline; -import org.rhq.core.domain.measurement.MeasurementSchedule; import org.rhq.server.metrics.domain.AggregateSimpleNumericMetric; import org.rhq.server.metrics.domain.AggregateType;
@@ -66,7 +64,6 @@ public class MetricsBaselineCalculatorTest {
@Test public void noCalculationTest() throws Exception { - //tell the method story as it happens: mock dependencies and configure //those dependencies to get the method under test to completion. StorageSession mockSession = mock(StorageSession.class); @@ -74,34 +71,24 @@ public class MetricsBaselineCalculatorTest { PowerMockito.whenNew(MetricsDAO.class).withParameterTypes(StorageSession.class, MetricsConfiguration.class) .withArguments(eq(mockSession), eq(metricsConfiguration)).thenReturn(mockMetricsDAO);
- when(mockMetricsDAO.findAggregatedSimpleOneHourMetric(eq(1), eq(0), - eq(1))).thenReturn(new ArrayList<AggregateSimpleNumericMetric>()); + when(mockMetricsDAO.findAggregatedSimpleOneHourMetric(eq(1), eq(0), eq(1))).thenReturn( + new ArrayList<AggregateSimpleNumericMetric>());
- MeasurementSchedule mockSchedule = mock(MeasurementSchedule.class); - when(mockSchedule.getId()).thenReturn(0); + int expectedScheduleId = 2567;
//create object to test and inject required dependencies MetricsBaselineCalculator objectUnderTest = new MetricsBaselineCalculator(new MetricsDAO(mockSession, metricsConfiguration));
//run code under test - List<MeasurementBaseline> result = objectUnderTest.calculateBaselines(Arrays.asList(mockSchedule), 0, 1); + List<MeasurementBaseline> result = objectUnderTest.calculateBaselines(Arrays.asList(expectedScheduleId), 0, 1);
//verify the results (Assert and mock verification) - assertEquals(result.size(), 1, "Expected to get back one baseline"); - MeasurementBaseline expected = new MeasurementBaseline(); - expected.setSchedule(mockSchedule); - expected.setMax(Double.NaN); - expected.setMin(Double.NaN); - expected.setMean(Double.NaN); - - assertPropertiesMatch("", expected, result.get(0), "computeTime"); + assertEquals(result.size(), 0, "No baselines expected");
verify(mockMetricsDAO, times(1)).findAggregatedSimpleOneHourMetric(any(Integer.class), any(Integer.class), any(Integer.class)); verifyNoMoreInteractions(mockMetricsDAO); - - verify(mockSchedule, times(1)).getId(); }
@Test @@ -138,7 +125,7 @@ public class MetricsBaselineCalculatorTest { } }
- int expectedScheduleId= 567; + int expectedScheduleId = 1567; long expectedStartTime = 135; long expectedEndTime = 246; long beforeComputeTime = System.currentTimeMillis(); @@ -154,15 +141,12 @@ public class MetricsBaselineCalculatorTest { mockMetricsDAO.findAggregatedSimpleOneHourMetric(eq(expectedScheduleId), eq(expectedStartTime), eq(expectedEndTime))).thenReturn(randomData);
- MeasurementSchedule mockSchedule = mock(MeasurementSchedule.class); - when(mockSchedule.getId()).thenReturn(expectedScheduleId); - //create object to test and inject required dependencies MetricsBaselineCalculator objectUnderTest = new MetricsBaselineCalculator(new MetricsDAO(mockSession, metricsConfiguration));
//run code under test - List<MeasurementBaseline> result = objectUnderTest.calculateBaselines(Arrays.asList(mockSchedule), + List<MeasurementBaseline> result = objectUnderTest.calculateBaselines(Arrays.asList(expectedScheduleId), expectedStartTime, expectedEndTime);
//verify the results (Assert and mock verification) @@ -172,7 +156,7 @@ public class MetricsBaselineCalculatorTest { Assert.assertEquals(baselineResult.getMean(), average, TEST_PRECISION); Assert.assertEquals(baselineResult.getMax(), expectedMax, TEST_PRECISION); Assert.assertEquals(baselineResult.getMin(), expectedMin, TEST_PRECISION); - Assert.assertEquals(baselineResult.getSchedule(), mockSchedule); + Assert.assertEquals(baselineResult.getScheduleId(), expectedScheduleId); if (baselineResult.getComputeTime().getTime() > System.currentTimeMillis()) { Assert.fail("Back compute time, the computation was forward dated."); } @@ -183,10 +167,6 @@ public class MetricsBaselineCalculatorTest { verify(mockMetricsDAO, times(1)).findAggregatedSimpleOneHourMetric(eq(expectedScheduleId), eq(expectedStartTime), eq(expectedEndTime)); verifyNoMoreInteractions(mockMetricsDAO); - - verify(mockSchedule, times(2)).getId(); - verify(mockSchedule, times(1)).setBaseline(eq(baselineResult)); - verifyNoMoreInteractions(mockSchedule); }
@Test @@ -221,15 +201,12 @@ public class MetricsBaselineCalculatorTest { mockMetricsDAO.findAggregatedSimpleOneHourMetric(eq(expectedScheduleId), eq(expectedStartTime), eq(expectedEndTime))).thenReturn(randomData);
- MeasurementSchedule mockSchedule = mock(MeasurementSchedule.class); - when(mockSchedule.getId()).thenReturn(expectedScheduleId); - //create object to test and inject required dependencies MetricsBaselineCalculator objectUnderTest = new MetricsBaselineCalculator(new MetricsDAO(mockSession, metricsConfiguration));
//run code under test - List<MeasurementBaseline> result = objectUnderTest.calculateBaselines(Arrays.asList(mockSchedule), + List<MeasurementBaseline> result = objectUnderTest.calculateBaselines(Arrays.asList(expectedScheduleId), expectedStartTime, expectedEndTime);
//verify the results (Assert and mock verification) @@ -239,15 +216,10 @@ public class MetricsBaselineCalculatorTest { Assert.assertEquals(baselineResult.getMean(), average, TEST_PRECISION); Assert.assertEquals(baselineResult.getMax(), expectedMinMax); Assert.assertEquals(baselineResult.getMin(), expectedMinMax); - Assert.assertEquals(baselineResult.getSchedule(), mockSchedule); + Assert.assertEquals(baselineResult.getScheduleId(), expectedScheduleId);
verify(mockMetricsDAO, times(1)).findAggregatedSimpleOneHourMetric(eq(expectedScheduleId), eq(expectedStartTime), eq(expectedEndTime)); verifyNoMoreInteractions(mockMetricsDAO); - - verify(mockSchedule, times(2)).getId(); - verify(mockSchedule, times(1)).setBaseline(eq(baselineResult)); - verifyNoMoreInteractions(mockSchedule); } - }
commit f469c776f4b896d526402ebb47e3924acf811a74 Author: John Sanda jsanda@redhat.com Date: Sat Aug 17 18:47:37 2013 -0400
update status property for undeployment operation modes
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java index 4d4596e..e0f278d 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java @@ -254,10 +254,17 @@ public class StorageNode implements Serializable { return Status.DOWN; } } + if (operationMode == OperationMode.DECOMMISSION || operationMode == OperationMode.UNANNOUNCE || + operationMode == OperationMode.REMOVE_MAINTENANCE || operationMode == OperationMode.UNINSTALL) { + if (errorMessage == null && failedOperation == null) { + return Status.LEAVING; + } else { + return Status.DOWN; + } + } if (operationMode == OperationMode.NORMAL) { return Status.NORMAL; } - // else operation mode is DOWN return Status.DOWN; }
commit 28ad34e1525450501ae0a835927dc62bfdcd4527 Author: John Sanda jsanda@redhat.com Date: Sat Aug 17 15:17:37 2013 -0400
add support for undeploying a node that is in a failed deployment state
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index fcf4526..04abeb88 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -250,9 +250,12 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN reset(); storageNodeOperationsHandler.performAddNodeMaintenance(subject, storageNode); default: - // For any other operation mode, the storage node should already be part of - // the cluster. - // TODO Make sure that the storage node is in fact part of the cluster + // TODO what do we do with/about maintenance mode? + + // We do not want to deploying a node that is in the process of being + // undeployed. It is too hard to make sure we are in an inconsistent state. + // Instead finishe the undeployment and redeploy the storage node. + throw new RuntimeException("Cannot deploy " + storageNode); } }
@@ -264,6 +267,12 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN reset(); storageNodeOperationsHandler.uninstall(subject, storageNode); break; + case ANNOUNCE: + case BOOTSTRAP: + reset(); + storageNodeOperationsHandler.unannounceStorageNode(subject, storageNode); + break; + case ADD_NODE_MAINTENANCE: case NORMAL: case DECOMMISSION: reset(); @@ -281,7 +290,8 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN storageNodeOperationsHandler.uninstall(subject, storageNode); break; default: - + // TODO what do we do with/about maintenance mode + throw new RuntimeException("Cannot undeploy " + storageNode); } }
commit d7b53fd524ac99dda87101dbb11858c7649ed5de Author: John Sanda jsanda@redhat.com Date: Sat Aug 17 14:36:04 2013 -0400
add a check to see if the node is already decommissioned
If has C* has already been decommissioned, i.e., its operation mode is DECOMMISSIONED, then trying the decommission operation again will result in C* throwing an exception which we could interpret as a failed operation. This makes the resource operation idempotent.
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index a82992e..7455f5e 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -273,10 +273,16 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper try { EmsConnection emsConnection = getEmsConnection(); EmsBean storageService = emsConnection.getBean("org.apache.cassandra.db:type=StorageService"); - Class<?>[] emptyParams = new Class<?>[0];
- EmsOperation operation = storageService.getOperation("decommission", emptyParams); - operation.invoke((Object[]) emptyParams); + EmsAttribute operationModeAttr = storageService.getAttribute("OperationMode"); + String operationMode = (String) operationModeAttr.refresh(); + if (operationMode.equals("DECOMMISSIONED")) { + log.info("The storage node at " + getResourceContext().getResourceKey() + " is already decommissioned."); + } else { + Class<?>[] emptyParams = new Class<?>[0]; + EmsOperation operation = storageService.getOperation("decommission", emptyParams); + operation.invoke((Object[]) emptyParams); + } } catch (EmsInvocationException e) { result.setErrorMessage("Decommission operation failed: " + ThrowableUtil.getAllMessages(e)); }
commit 531e3d7784563107fa9ce3feb4b59c23980a668c Author: John Sanda jsanda@redhat.com Date: Sat Aug 17 14:00:27 2013 -0400
add support for resuming a failed undeployment
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 341cbc7..fcf4526 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -265,9 +265,13 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN storageNodeOperationsHandler.uninstall(subject, storageNode); break; case NORMAL: + case DECOMMISSION: reset(); storageNodeOperationsHandler.decommissionStorageNode(subject, storageNode); break; + case REMOVE_MAINTENANCE: + reset(); + storageNodeOperationsHandler.performRemoveNodeMaintenance(subject, storageNode); case UNANNOUNCE: reset(); storageNodeOperationsHandler.unannounceStorageNode(subject, storageNode); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java index a30cfa7..f2fc108 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java @@ -78,9 +78,6 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa @EJB private ResourceManagerLocal resourceManager;
- @EJB - private ResourceFactoryManagerLocal resourceFactoryManager; - @Override public void announceStorageNode(Subject subject, StorageNode storageNode) { if (log.isInfoEnabled()) { @@ -150,19 +147,19 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa
storageNode.setOperationMode(StorageNode.OperationMode.UNINSTALL);
- ResourceOperationSchedule schedule = new ResourceOperationSchedule(); - schedule.setResource(storageNode.getResource()); - schedule.setJobTrigger(JobTrigger.createNowTrigger()); - schedule.setSubject(subject); - schedule.setOperationName("uninstall"); - Configuration parameters = new Configuration(); - schedule.setParameters(parameters); - - operationManager.scheduleResourceOperation(subject, schedule); - -// Resource resource = storageNode.getResource(); -// storageNodeOperationsHandler.detachFromResource(storageNode); -// storageNodeOperationsHandler.deleteStorageNodeResource(subject, resource); + if (storageNode.getResource() == null) { + finishUninstall(subject, storageNode); + } else { + ResourceOperationSchedule schedule = new ResourceOperationSchedule(); + schedule.setResource(storageNode.getResource()); + schedule.setJobTrigger(JobTrigger.createNowTrigger()); + schedule.setSubject(subject); + schedule.setOperationName("uninstall"); + Configuration parameters = new Configuration(); + schedule.setParameters(parameters); + + operationManager.scheduleResourceOperation(subject, schedule); + } }
@Override @@ -173,13 +170,6 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa }
@Override - @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) - public void deleteStorageNodeResource(Subject subject, Resource resource) { - log.info("Preparing to delete storage node resource " + resource); - resourceFactoryManager.deleteResource(subject, resource.getId()); - } - - @Override public void decommissionStorageNode(Subject subject, StorageNode storageNode) { log.info("Preparing to decommission " + storageNode);
@@ -272,20 +262,25 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa
if (storageNode.getOperationMode() == StorageNode.OperationMode.DECOMMISSION) { storageNode.setOperationMode(StorageNode.OperationMode.REMOVE_MAINTENANCE); - List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, - StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL) - .getResultList(); - for (StorageNode node : clusterNodes) { - node.setMaintenancePending(true); - } - boolean runRepair = storageNode.isMaintenancePending(); - performRemoveNodeMaintenance(subjectManager.getOverlord(), clusterNodes.get(0), runRepair, - createPropertyListOfAddresses(SEEDS_LIST, clusterNodes)); + performRemoveNodeMaintenance(subjectManager.getOverlord(), storageNode); } else { log.info("Remove node maintenance has already been run for " + storageNode); } }
+ @Override + public void performRemoveNodeMaintenance(Subject subject, StorageNode storageNode) { + List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, + StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL) + .getResultList(); + for (StorageNode node : clusterNodes) { + node.setMaintenancePending(true); + } + boolean runRepair = storageNode.isMaintenancePending(); + performRemoveNodeMaintenance(subjectManager.getOverlord(), clusterNodes.get(0), runRepair, + createPropertyListOfAddresses(SEEDS_LIST, clusterNodes)); + } + private void performRemoveNodeMaintenance(Subject subject, StorageNode storageNode, boolean runRepair, PropertyList seedsList) { if (log.isInfoEnabled()) { @@ -583,16 +578,18 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa break; default: // SUCCESS log.info("Successfully uninstalled " + storageNode + " from disk"); - Resource resource = storageNode.getResource(); - - log.info("Remove storage node resource " + resource + " from inventory"); - - storageNodeOperationsHandler.detachFromResource(storageNode); - resourceManager.uninventoryResource(getSubject(operationHistory), resource.getId()); + uninstall(getSubject(operationHistory), storageNode); + } + }
- log.info("Removing storage node entity " + storageNode + " from database"); - entityManager.remove(storageNode); + private void finishUninstall(Subject subject, StorageNode storageNode) { + if (storageNode.getResource() != null) { + log.info("Removing storage node resource " + storageNode.getResource() + " from inventory"); + storageNodeOperationsHandler.detachFromResource(storageNode); + resourceManager.uninventoryResource(subject, storageNode.getResource().getId()); } + log.info("Removing storage node entity " + storageNode + " from database"); + entityManager.remove(storageNode); }
private Subject getSubject(ResourceOperationHistory resourceOperationHistory) { diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java index 7ed2c4d..5d08dd8 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java @@ -8,7 +8,6 @@ import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.operation.OperationHistory; import org.rhq.core.domain.operation.ResourceOperationHistory; -import org.rhq.core.domain.resource.Resource;
/** * @author John Sanda @@ -46,11 +45,11 @@ public interface StorageNodeOperationsHandlerLocal {
void detachFromResource(StorageNode storageNode);
- void deleteStorageNodeResource(Subject subject, Resource resource); - void decommissionStorageNode(Subject subject, StorageNode storageNode);
void performRemoveNodeMaintenanceIfNecessary(InetAddress storageNodeAddress);
+ void performRemoveNodeMaintenance(Subject subject, StorageNode storageNode); + void logError(StorageNode.OperationMode newStorageNodeOperationMode, String error, Exception e); }
commit 7e25cb04e6935259fac714fa5b507c6b2d23d51b Author: John Sanda jsanda@redhat.com Date: Sat Aug 17 12:06:03 2013 -0400
initial support for undeploying a storage node
Undeploying a storage node involves,
* removing the node from the cluster * updating existing nodes cassandra.yaml to no longer reference the node * updating the internode auth conf file for existing nodes * shutting down the node and purging its files from disk * removing the node's resource from inventory * deleting the StorageNode entity
There is stil a good bit of work left to do to handle various cases like going back to 1 node and kicking off the undeployment in all the various states.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java index 48d5f83..4d4596e 100644 --- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java @@ -24,6 +24,7 @@ package org.rhq.core.domain.cloud;
import java.io.Serializable;
+import javax.persistence.CascadeType; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.EnumType; @@ -146,7 +147,7 @@ public class StorageNode implements Serializable { private Resource resource;
@JoinColumn(name = "RESOURCE_OP_HIST_ID", referencedColumnName = "ID", nullable = true) - @OneToOne(optional = true) + @OneToOne(optional = true, cascade = {CascadeType.REMOVE}) private ResourceOperationHistory failedOperation;
// required for JPA @@ -261,17 +262,22 @@ public class StorageNode implements Serializable { }
public enum OperationMode { - + DECOMMISSION("Remove the storage node from service"), DOWN("This storage node is down"), // INSTALLED("This storage node is newly installed but not yet operational"), // MAINTENANCE("This storage node is in maintenance mode"), // NORMAL("This storage node is running normally"), ANNOUNCE("The storage node is installed but not yet part of the cluster. It is being announced so that it " + "can join the cluster."), + UNANNOUNCE("The storage node has been decommissioned and the cluster is being notified to stop accepting " + + "gossip from its IP address."), BOOTSTRAP("The storage is installed but not yet part of the cluster. It is getting bootstrapped into the " + "cluster"), ADD_NODE_MAINTENANCE("The storage node is running and is preparing to undergo routine maintenance that is " + - "necessary when a new node joins the cluster."); + "necessary when a new node joins the cluster."), + REMOVE_MAINTENANCE("The storage node is no longer part of the cluster. Remaining storage node are " + + "undergoing cluster maintenance due to the topology change."), + UNINSTALL("The storage node is being removed from inventory and its bits on disk are getting purged.");
public final String message;
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 5cce984..341cbc7 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -256,6 +256,31 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN } }
+ @Override + public void undeployStorageNode(Subject subject, StorageNode storageNode) { + storageNode = entityManager.find(StorageNode.class, storageNode.getId()); + switch (storageNode.getOperationMode()) { + case INSTALLED: + reset(); + storageNodeOperationsHandler.uninstall(subject, storageNode); + break; + case NORMAL: + reset(); + storageNodeOperationsHandler.decommissionStorageNode(subject, storageNode); + break; + case UNANNOUNCE: + reset(); + storageNodeOperationsHandler.unannounceStorageNode(subject, storageNode); + break; + case UNINSTALL: + reset(); + storageNodeOperationsHandler.uninstall(subject, storageNode); + break; + default: + + } + } + private void reset() { for (StorageNode storageNode : getStorageNodes()) { storageNode.setErrorMessage(null); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java index 75a795c..58a06a7 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java @@ -170,4 +170,6 @@ public interface StorageNodeManagerLocal { StorageNode createStorageNode(Resource resource);
void deployStorageNode(Subject subject, StorageNode storageNode); + + void undeployStorageNode(Subject subject, StorageNode storageNode); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java index 2255299..7ffb2a6 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java @@ -98,4 +98,6 @@ public interface StorageNodeManagerRemote { PageList<Alert> findAllStorageNodeAlerts(Subject subject, StorageNode storageNode);
void deployStorageNode(Subject sbubject, StorageNode storageNode); + + void undeployStorageNode(Subject subject, StorageNode storageNode); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java index 7db95fb..bd2efbe 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterMonitor.java @@ -41,6 +41,8 @@ public class StorageClusterMonitor implements StorageStateListener { @Override public void onStorageNodeRemoved(InetAddress address) { log.info("Storage node at " + address.getHostAddress() + " has been removed from the cluster"); + StorageNodeOperationsHandlerLocal storageNodeOperationsHandler = LookupUtil.getStorageNodeOperationsHandler(); + storageNodeOperationsHandler.performRemoveNodeMaintenanceIfNecessary(address); }
@Override diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java index 4969c46..a30cfa7 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java @@ -2,7 +2,6 @@ package org.rhq.enterprise.server.storage;
import java.net.InetAddress; import java.util.ArrayList; -import java.util.LinkedList; import java.util.List;
import javax.ejb.Asynchronous; @@ -35,6 +34,8 @@ import org.rhq.enterprise.server.auth.SessionManager; import org.rhq.enterprise.server.auth.SubjectManagerLocal; import org.rhq.enterprise.server.cloud.StorageNodeManagerLocal; import org.rhq.enterprise.server.operation.OperationManagerLocal; +import org.rhq.enterprise.server.resource.ResourceFactoryManagerLocal; +import org.rhq.enterprise.server.resource.ResourceManagerLocal; import org.rhq.server.metrics.StorageSession;
/** @@ -74,6 +75,12 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa @EJB private StorageNodeOperationsHandlerLocal storageNodeOperationsHandler;
+ @EJB + private ResourceManagerLocal resourceManager; + + @EJB + private ResourceFactoryManagerLocal resourceFactoryManager; + @Override public void announceStorageNode(Subject subject, StorageNode storageNode) { if (log.isInfoEnabled()) { @@ -103,7 +110,33 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa schedule.setResource(clusterNode.getResource()); schedule.setJobTrigger(JobTrigger.createNowTrigger()); schedule.setSubject(subject); - schedule.setOperationName("updateKnownNodes"); + schedule.setOperationName("announce"); + Configuration parameters = new Configuration(); + parameters.put(addresses); + schedule.setParameters(parameters); + + operationManager.scheduleResourceOperation(subject, schedule); + } + + @Override + public void unannounceStorageNode(Subject subject, StorageNode storageNode) { + log.info("Unannouncing " + storageNode); + + storageNode.setOperationMode(StorageNode.OperationMode.UNANNOUNCE); + List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, + StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL).getResultList(); + for (StorageNode clusterNode : clusterNodes) { + clusterNode.setMaintenancePending(true); + } + unannounceStorageNode(subject, clusterNodes.get(0), createPropertyListOfAddresses("addresses", clusterNodes)); + } + + private void unannounceStorageNode(Subject subject, StorageNode clusterNode, PropertyList addresses) { + ResourceOperationSchedule schedule = new ResourceOperationSchedule(); + schedule.setResource(clusterNode.getResource()); + schedule.setJobTrigger(JobTrigger.createNowTrigger()); + schedule.setSubject(subject); + schedule.setOperationName("unannounce"); Configuration parameters = new Configuration(); parameters.put(addresses); schedule.setParameters(parameters); @@ -112,10 +145,69 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa }
@Override + public void uninstall(Subject subject, StorageNode storageNode) { + log.info("Uninstalling " + storageNode); + + storageNode.setOperationMode(StorageNode.OperationMode.UNINSTALL); + + ResourceOperationSchedule schedule = new ResourceOperationSchedule(); + schedule.setResource(storageNode.getResource()); + schedule.setJobTrigger(JobTrigger.createNowTrigger()); + schedule.setSubject(subject); + schedule.setOperationName("uninstall"); + Configuration parameters = new Configuration(); + schedule.setParameters(parameters); + + operationManager.scheduleResourceOperation(subject, schedule); + +// Resource resource = storageNode.getResource(); +// storageNodeOperationsHandler.detachFromResource(storageNode); +// storageNodeOperationsHandler.deleteStorageNodeResource(subject, resource); + } + + @Override + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + public void detachFromResource(StorageNode storageNode) { + storageNode.setResource(null); + storageNode.setFailedOperation(null); + } + + @Override + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + public void deleteStorageNodeResource(Subject subject, Resource resource) { + log.info("Preparing to delete storage node resource " + resource); + resourceFactoryManager.deleteResource(subject, resource.getId()); + } + + @Override + public void decommissionStorageNode(Subject subject, StorageNode storageNode) { + log.info("Preparing to decommission " + storageNode); + + storageNode.setOperationMode(StorageNode.OperationMode.DECOMMISSION); + List<StorageNode> storageNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, + StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL).getResultList(); + storageNodes.add(storageNode); + + boolean runRepair = updateSchemaIfNecessary(storageNodes); + // This is a bit of a hack since the maintenancePending flag is really intended to + // queue up storage nodes during cluster maintenance operations. + storageNode.setMaintenancePending(runRepair); + + ResourceOperationSchedule schedule = new ResourceOperationSchedule(); + schedule.setOperationName("decommission"); + schedule.setResource(storageNode.getResource()); + schedule.setJobTrigger(JobTrigger.createNowTrigger()); + schedule.setSubject(subject); + schedule.setParameters(new Configuration()); + + operationManager.scheduleResourceOperation(subject, schedule); + } + + @Override @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public void logError(StorageNode.OperationMode newStorageNodeOperationMode, String error, Exception e) { try { - StorageNode newStorageNode = findNewStorgeNode(newStorageNodeOperationMode); + StorageNode newStorageNode = findStorageNodeByMode(newStorageNodeOperationMode); newStorageNode.setErrorMessage(error + " Check the server log for details. Root cause: " + ThrowableUtil.getRootCause(e).getMessage()); } catch (Exception e1) { @@ -157,8 +249,6 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa log.info("Running addNodeMaintenance for storage node " + storageNode); }
- Subject overlord = subjectManager.getOverlord(); - ResourceOperationSchedule schedule = new ResourceOperationSchedule(); schedule.setResource(storageNode.getResource()); schedule.setJobTrigger(JobTrigger.createNowTrigger()); @@ -172,7 +262,50 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa
schedule.setParameters(config);
- operationManager.scheduleResourceOperation(overlord, schedule); + operationManager.scheduleResourceOperation(subject, schedule); + } + + @Override + public void performRemoveNodeMaintenanceIfNecessary(InetAddress storageNodeAddress) { + StorageNode storageNode = entityManager.createNamedQuery(StorageNode.QUERY_FIND_BY_ADDRESS, + StorageNode.class).setParameter("address", storageNodeAddress.getHostAddress()).getSingleResult(); + + if (storageNode.getOperationMode() == StorageNode.OperationMode.DECOMMISSION) { + storageNode.setOperationMode(StorageNode.OperationMode.REMOVE_MAINTENANCE); + List<StorageNode> clusterNodes = entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, + StorageNode.class).setParameter("operationMode", StorageNode.OperationMode.NORMAL) + .getResultList(); + for (StorageNode node : clusterNodes) { + node.setMaintenancePending(true); + } + boolean runRepair = storageNode.isMaintenancePending(); + performRemoveNodeMaintenance(subjectManager.getOverlord(), clusterNodes.get(0), runRepair, + createPropertyListOfAddresses(SEEDS_LIST, clusterNodes)); + } else { + log.info("Remove node maintenance has already been run for " + storageNode); + } + } + + private void performRemoveNodeMaintenance(Subject subject, StorageNode storageNode, boolean runRepair, + PropertyList seedsList) { + if (log.isInfoEnabled()) { + log.info("Running remove node maintenance for storage node " + storageNode); + } + + ResourceOperationSchedule schedule = new ResourceOperationSchedule(); + schedule.setResource(storageNode.getResource()); + schedule.setJobTrigger(JobTrigger.createNowTrigger()); + schedule.setSubject(subject); + schedule.setOperationName("removeNodeMaintenance"); + + Configuration config = new Configuration(); + config.put(seedsList); + config.put(new PropertySimple(RUN_REPAIR_PROPERTY, runRepair)); + config.put(new PropertySimple(UPDATE_SEEDS_LIST, Boolean.TRUE)); + + schedule.setParameters(config); + + operationManager.scheduleResourceOperation(subject, schedule); }
@Override @@ -187,9 +320,9 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa return; }
- if (resourceOperationHistory.getOperationDefinition().getName().equals("updateKnownNodes")) { + if (resourceOperationHistory.getOperationDefinition().getName().equals("announce")) { try { - storageNodeOperationsHandler.handleUpdateKnownNodes(resourceOperationHistory); + storageNodeOperationsHandler.handleAnnounce(resourceOperationHistory); } catch (Exception e) { String msg = "Aborting storage node deployment due to unexpected error while announcing cluster nodes."; log.error(msg, e); @@ -212,13 +345,44 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa log.error(msg, e); storageNodeOperationsHandler.logError(StorageNode.OperationMode.ADD_NODE_MAINTENANCE, msg, e); } + } else if (operationHistory.getOperationDefinition().getName().equals("decommission")) { + try { + storageNodeOperationsHandler.handleDecommission(resourceOperationHistory); + } catch (Exception e) { + String msg = "Aborting undeployment due to unexpected error while decommissioning storage node."; + log.error(msg, e); + storageNodeOperationsHandler.logError(StorageNode.OperationMode.DECOMMISSION, msg, e); + } + } else if (operationHistory.getOperationDefinition().getName().equals("removeNodeMaintenance")) { + try { + storageNodeOperationsHandler.handleRemoveNodeMaintenance(resourceOperationHistory); + } catch (Exception e) { + String msg = "Aborting undeployment due to unexpected error while performing remove node maintenance."; + log.error(msg, e); + storageNodeOperationsHandler.logError(StorageNode.OperationMode.REMOVE_MAINTENANCE, msg, e); + } + } else if (operationHistory.getOperationDefinition().getName().equals("unannounce")) { + try { + storageNodeOperationsHandler.handleUnannounce(resourceOperationHistory); + } catch (Exception e) { + String msg = "Aborting undeployment due to unexpected error while performing unannouncement."; + log.error(msg, e); + storageNodeOperationsHandler.logError(StorageNode.OperationMode.UNANNOUNCE, msg, e); + } + } else if (operationHistory.getOperationDefinition().getName().equals("uninstall")) { + try { + storageNodeOperationsHandler.handleUninstall(resourceOperationHistory); + } catch (Exception e) { + String msg = "Aborting undeployment due to unexpected error while uninstalling."; + log.error(msg, e); + storageNodeOperationsHandler.logError(StorageNode.OperationMode.UNINSTALL, msg, e); + } } - }
@Override @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) - public void handleUpdateKnownNodes(ResourceOperationHistory resourceOperationHistory) { + public void handleAnnounce(ResourceOperationHistory resourceOperationHistory) { StorageNode storageNode = findStorageNode(resourceOperationHistory.getResource()); StorageNode newStorageNode = null; switch (resourceOperationHistory.getStatus()) { @@ -226,11 +390,11 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa // nothing to do here return; case CANCELED: - newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ANNOUNCE); - operationCanceled(storageNode, resourceOperationHistory, newStorageNode); + newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ANNOUNCE); + deploymentOperationCanceled(storageNode, resourceOperationHistory, newStorageNode); case FAILURE: - newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ANNOUNCE); - operationFailed(storageNode, resourceOperationHistory, newStorageNode); + newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ANNOUNCE); + deploymentOperationFailed(storageNode, resourceOperationHistory, newStorageNode); return; default: // SUCCESS storageNode.setMaintenancePending(false); @@ -238,7 +402,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa PropertyList addresses = parameters.getList("addresses"); StorageNode nextNode = takeFromMaintenanceQueue();
- newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ANNOUNCE); + newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ANNOUNCE); Subject subject = getSubject(resourceOperationHistory);
if (nextNode == null) { @@ -253,6 +417,41 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa
@Override @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + public void handleUnannounce(ResourceOperationHistory operationHistory) { + StorageNode storageNode = findStorageNode(operationHistory.getResource()); + StorageNode removedStorageNode = null; + switch (operationHistory.getStatus()) { + case INPROGRESS: + // nothing to do here + break; + case CANCELED: + removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.UNANNOUNCE); + undeploymentOperationCanceled(storageNode, operationHistory, removedStorageNode); + break; + case FAILURE: + removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.UNANNOUNCE); + deploymentOperationFailed(storageNode, operationHistory, removedStorageNode); + break; + default: // SUCCESS + storageNode.setMaintenancePending(false); + + removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.UNANNOUNCE); + StorageNode nextNode = takeFromMaintenanceQueue(); + Subject subject = getSubject(operationHistory); + Configuration params = operationHistory.getParameters(); + PropertyList addresses = params.getList("addresses"); + + if (nextNode == null) { + log.info("Successfully unannounced " + removedStorageNode + " to storage cluster"); + uninstall(getSubject(operationHistory), removedStorageNode); + } else { + unannounceStorageNode(subject, nextNode, addresses.deepCopy(false)); + } + } + } + + @Override + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public void handlePrepareForBootstrap(ResourceOperationHistory resourceOperationHistory) { StorageNode newStorageNode = findStorageNode(resourceOperationHistory.getResource()); switch (resourceOperationHistory.getStatus()) { @@ -264,10 +463,10 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa // If the operation is canceled the plugin will get an InterruptedException. // The actual bootstrapping may very well complete so we need to add in some // checks to find out if the node is up and part of the cluster. - operationCanceled(newStorageNode, resourceOperationHistory); + deploymentOperationCanceled(newStorageNode, resourceOperationHistory); return; case FAILURE: - operationFailed(newStorageNode, resourceOperationHistory); + deploymentOperationFailed(newStorageNode, resourceOperationHistory); return; default: // SUCCESS // Nothing to do because we wait for the C* driver to notify us that the @@ -285,24 +484,22 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa // nothing to do here return; case CANCELED: - newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); - operationCanceled(storageNode, resourceOperationHistory, newStorageNode); + newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); + deploymentOperationCanceled(storageNode, resourceOperationHistory, newStorageNode); return; case FAILURE: - newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); - operationFailed(storageNode, resourceOperationHistory, newStorageNode); + newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); + deploymentOperationFailed(storageNode, resourceOperationHistory, newStorageNode); return; default: // SUCCESS - if (log.isInfoEnabled()) { - log.info("Finished cluster maintenance for " + storageNode + " for addition of new node"); - } + log.info("Finished running add node maintenance for " + storageNode); storageNode.setMaintenancePending(false); StorageNode nextNode = takeFromMaintenanceQueue();
if (nextNode == null) { - log.info("Finished running cluster maintenance for addition of new node"); + log.info("Finished running add node maintenance on all cluster nodes"); // TODO replace this with an UPDATE statement - newStorageNode = findNewStorgeNode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); + newStorageNode = findStorageNodeByMode(StorageNode.OperationMode.ADD_NODE_MAINTENANCE); newStorageNode.setOperationMode(StorageNode.OperationMode.NORMAL); } else { Configuration parameters = resourceOperationHistory.getParameters(); @@ -314,56 +511,176 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa } }
+ @Override + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + public void handleRemoveNodeMaintenance(ResourceOperationHistory operationHistory) { + StorageNode storageNode = findStorageNode(operationHistory.getResource()); + StorageNode removedStorageNode = null; + switch (operationHistory.getStatus()) { + case INPROGRESS: + // nothing to do here + break; + case CANCELED: + removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.REMOVE_MAINTENANCE); + undeploymentOperationCanceled(storageNode, operationHistory, removedStorageNode); + break; + case FAILURE: + removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.REMOVE_MAINTENANCE); + undeploymentOperationFailed(storageNode, operationHistory, removedStorageNode); + break; + default: // SUCCESS + log.info("Finished remove node maintenance for " + storageNode); + storageNode.setMaintenancePending(false); + StorageNode nextNode = takeFromMaintenanceQueue(); + + if (nextNode == null) { + log.info("Finished running remove node maintenance on all cluster nodes"); + // TODO replace this with an UPDATE statement + removedStorageNode = findStorageNodeByMode(StorageNode.OperationMode.REMOVE_MAINTENANCE); + unannounceStorageNode(getSubject(operationHistory), removedStorageNode); + } else { + Configuration parameters = operationHistory.getParameters(); + boolean runRepair = parameters.getSimple(RUN_REPAIR_PROPERTY).getBooleanValue(); + PropertyList seedsList = parameters.getList(SEEDS_LIST).deepCopy(false); + Subject subject = getSubject(operationHistory); + performRemoveNodeMaintenance(subject, nextNode, runRepair, seedsList); + } + } + } + + @Override + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + public void handleDecommission(ResourceOperationHistory operationHistory) { + StorageNode storageNode = findStorageNode(operationHistory.getResource()); + switch (operationHistory.getStatus()) { + case INPROGRESS: + // nothing do to here + break; + case CANCELED: + undeploymentOperationCanceled(storageNode, operationHistory); + break; + case FAILURE: + undeploymentOperationFailed(storageNode, operationHistory); + break; + default: // SUCCESS + log.info("Successfully decommissioned " + storageNode); + } + } + + @Override + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + public void handleUninstall(ResourceOperationHistory operationHistory) { + StorageNode storageNode = findStorageNode(operationHistory.getResource()); + switch (operationHistory.getStatus()) { + case INPROGRESS: + // nothing to do here + break; + case CANCELED: + undeploymentOperationCanceled(storageNode, operationHistory); + break; + case FAILURE: + undeploymentOperationFailed(storageNode, operationHistory); + break; + default: // SUCCESS + log.info("Successfully uninstalled " + storageNode + " from disk"); + Resource resource = storageNode.getResource(); + + log.info("Remove storage node resource " + resource + " from inventory"); + + storageNodeOperationsHandler.detachFromResource(storageNode); + resourceManager.uninventoryResource(getSubject(operationHistory), resource.getId()); + + log.info("Removing storage node entity " + storageNode + " from database"); + entityManager.remove(storageNode); + } + } + private Subject getSubject(ResourceOperationHistory resourceOperationHistory) { Subject subject = subjectManager.getSubjectByName(resourceOperationHistory.getSubjectName()); return SessionManager.getInstance().put(subject); }
- private void operationCanceled(StorageNode storageNode, ResourceOperationHistory operationHistory, + private void deploymentOperationCanceled(StorageNode storageNode, ResourceOperationHistory operationHistory, StorageNode newStorageNode) { - log.error("Deployment has been aborted due to canceled operation [" + + operationCanceled(storageNode, operationHistory, newStorageNode, "Deployment"); + } + + private void undeploymentOperationCanceled(StorageNode storageNode, ResourceOperationHistory operationHistory, + StorageNode removedStorageNode) { + operationCanceled(storageNode, operationHistory, removedStorageNode, "Undeployment"); + } + + private void operationCanceled(StorageNode storageNode, ResourceOperationHistory operationHistory, + StorageNode movingNode, String opType) { + log.error(opType + " has been aborted due to canceled operation [" + operationHistory.getOperationDefinition().getDisplayName() + " on " + storageNode.getResource() + ": " + operationHistory.getErrorMessage());
- newStorageNode.setErrorMessage("Deployment has been aborted due to canceled resource operation on " + + movingNode.setErrorMessage(opType + " has been aborted due to canceled resource operation on " + storageNode.getAddress()); - storageNode.setErrorMessage("Deployment of " + newStorageNode.getAddress() + " has been aborted due " + + storageNode.setErrorMessage(opType + " of " + movingNode.getAddress() + " has been aborted due " + "to cancellation of resource operation [" + operationHistory.getOperationDefinition().getDisplayName() + "]."); storageNode.setFailedOperation(operationHistory); }
- private void operationCanceled(StorageNode newStorageNode, ResourceOperationHistory operationHistory) { - log.error("Deployment has been aborted due to canceled operation [" + - operationHistory.getOperationDefinition().getDisplayName() + " on " + newStorageNode.getResource() + + private void deploymentOperationCanceled(StorageNode newStorageNode, ResourceOperationHistory operationHistory) { + operationCanceled(newStorageNode, operationHistory, "Deployment"); + } + + private void undeploymentOperationCanceled(StorageNode storageNode, ResourceOperationHistory operationHistory) { + operationCanceled(storageNode, operationHistory, "Undeployment"); + } + + private void operationCanceled(StorageNode storageNode, ResourceOperationHistory operationHistory, String opType) { + log.error(opType + " has been aborted due to canceled operation [" + + operationHistory.getOperationDefinition().getDisplayName() + " on " + storageNode.getResource() + ": " + operationHistory.getErrorMessage());
- newStorageNode.setErrorMessage("Deployment has been aborted due to canceled resource operation [" + + storageNode.setErrorMessage(opType + " has been aborted due to canceled resource operation [" + operationHistory.getOperationDefinition().getDisplayName() + "]."); - newStorageNode.setFailedOperation(operationHistory); + storageNode.setFailedOperation(operationHistory); }
- private void operationFailed(StorageNode storageNode, ResourceOperationHistory operationHistory, + private void deploymentOperationFailed(StorageNode storageNode, ResourceOperationHistory operationHistory, StorageNode newStorageNode) { - log.error("Deployment has been aborted due to failed operation [" + - operationHistory.getOperationDefinition().getDisplayName() + "] on " + storageNode.getResource() + - ": " + operationHistory.getErrorMessage()); + operationFailed(storageNode, operationHistory, newStorageNode, "Deployment"); + }
- newStorageNode.setErrorMessage("Deployment has been aborted due to failed resource operation on " + - storageNode.getAddress()); - storageNode.setErrorMessage("Deployment of " + newStorageNode.getAddress() + " has been aborted due " + - "to failed resource operation [" + operationHistory.getOperationDefinition().getDisplayName() + "]."); - storageNode.setFailedOperation(operationHistory); + private void undeploymentOperationFailed(StorageNode storageNode, ResourceOperationHistory operationHistory, + StorageNode removedNode) { + operationFailed(storageNode, operationHistory, removedNode, "Undeployment"); + } + + private void deploymentOperationFailed(StorageNode storageNode, ResourceOperationHistory operationHistory) { + operationFailed(storageNode, operationHistory, "Deployment"); + } + + private void undeploymentOperationFailed(StorageNode storageNode, ResourceOperationHistory operationHistory) { + operationFailed(storageNode, operationHistory, "Undeployment"); }
- private void operationFailed(StorageNode newStorageNode, ResourceOperationHistory operationHistory) { - log.error("Deployment has been aborted due to failed operation [" + - operationHistory.getOperationDefinition().getDisplayName() + "] on " + newStorageNode.getResource() + + private void operationFailed(StorageNode storageNode, ResourceOperationHistory operationHistory, String opType) { + log.error(opType + " has been aborted due to failed operation [" + + operationHistory.getOperationDefinition().getDisplayName() + "] on " + storageNode.getResource() + ": " + operationHistory.getErrorMessage());
- newStorageNode.setErrorMessage("Deployment has been aborted due to failed resource operation [" + + storageNode.setErrorMessage(opType + " has been aborted due to failed resource operation [" + operationHistory.getOperationDefinition().getDisplayName() + "]."); - newStorageNode.setFailedOperation(operationHistory); + storageNode.setFailedOperation(operationHistory); + } + + private void operationFailed(StorageNode storageNode, ResourceOperationHistory operationHistory, + StorageNode movingNode, String opType) { + log.error(opType + " has been aborted due to failed operation [" + + operationHistory.getOperationDefinition().getDisplayName() + "] on " + storageNode.getResource() + + ": " + operationHistory.getErrorMessage()); + + movingNode.setErrorMessage(opType + " has been aborted due to failed resource operation on " + + storageNode.getAddress()); + storageNode.setErrorMessage(opType + " of " + movingNode.getAddress() + " has been aborted due " + + "to failed resource operation [" + operationHistory.getOperationDefinition().getDisplayName() + "]."); + storageNode.setFailedOperation(operationHistory); }
private StorageNode findStorageNode(Resource resource) { @@ -417,7 +734,7 @@ public class StorageNodeOperationsHandlerBean implements StorageNodeOperationsHa return storageNodes.get(0); }
- private StorageNode findNewStorgeNode(StorageNode.OperationMode operationMode) { + private StorageNode findStorageNodeByMode(StorageNode.OperationMode operationMode) { return entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class) .setParameter("operationMode", operationMode).getSingleResult(); } diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java index 83b0ce5..7ed2c4d 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java @@ -8,6 +8,7 @@ import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.operation.OperationHistory; import org.rhq.core.domain.operation.ResourceOperationHistory; +import org.rhq.core.domain.resource.Resource;
/** * @author John Sanda @@ -17,19 +18,39 @@ public interface StorageNodeOperationsHandlerLocal { @Asynchronous void handleOperationUpdateIfNecessary(OperationHistory operationHistory);
- void handleUpdateKnownNodes(ResourceOperationHistory operationHistory); + void handleAnnounce(ResourceOperationHistory operationHistory); + + void handleUnannounce(ResourceOperationHistory operationHistory);
void handlePrepareForBootstrap(ResourceOperationHistory operationHistory);
void handleAddNodeMaintenance(ResourceOperationHistory operationHistory);
+ void handleRemoveNodeMaintenance(ResourceOperationHistory operationHistory); + + void handleDecommission(ResourceOperationHistory operationHistory); + + void handleUninstall(ResourceOperationHistory operationHistory); + void announceStorageNode(Subject subject, StorageNode storageNode);
+ void unannounceStorageNode(Subject subject, StorageNode storageNode); + void bootstrapStorageNode(Subject subject, StorageNode storageNode);
void performAddNodeMaintenanceIfNecessary(InetAddress storageNodeAddress);
void performAddNodeMaintenance(Subject subject, StorageNode storageNode);
+ void uninstall(Subject subject, StorageNode storageNode); + + void detachFromResource(StorageNode storageNode); + + void deleteStorageNodeResource(Subject subject, Resource resource); + + void decommissionStorageNode(Subject subject, StorageNode storageNode); + + void performRemoveNodeMaintenanceIfNecessary(InetAddress storageNodeAddress); + void logError(StorageNode.OperationMode newStorageNodeOperationMode, String error, Exception e); } diff --git a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java index 5692bea..1bf9683 100644 --- a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java +++ b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java @@ -67,7 +67,8 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone private static final String FREE_DISK_TO_DATA_SIZE_RATIO_METRIC_NAME = "Calculated.FreeDiskToDataSizeRatio"; private static final String TAKE_SNAPSHOT_OPERATION_NAME = "takeSnapshot"; private static final String[] MAINTENANCE_OPERATIONS = new String[] { "readRepair", "addNodeMaintenance", - "updateKnownNodes", "prepareForBootstrap", "prepareForUpgrade", "updateSeedsList", "updateConfiguration" }; + "removeNodeMaintenance", "announce", "unannounce", "prepareForBootstrap", "prepareForUpgrade", + "updateSeedsList", "updateConfiguration" };
static private final List<InjectedTemplate> injectedTemplates; static private final InjectedTemplate storageNodeHighHeapTemplate;
commit a48d538f936301f849b1092666e7f9e04221d274 Author: John Sanda jsanda@redhat.com Date: Sat Aug 17 11:57:38 2013 -0400
Implement storage node uninstall as resource operation
The uninstall task invovles purging the storage node bits from disk and removing it from inventory. I previously implemented this by implementing the DeleteResourceFacet. This was problematic though because the server side logic for the deploy/undeploy work flows are centered around resource operations.
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index 64d672d..a82992e 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -55,7 +55,6 @@ import org.rhq.core.domain.configuration.PropertyMap; import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.core.pluginapi.configuration.ConfigurationFacet; import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport; -import org.rhq.core.pluginapi.inventory.DeleteResourceFacet; import org.rhq.core.pluginapi.inventory.ProcessScanResult; import org.rhq.core.pluginapi.inventory.ResourceContext; import org.rhq.core.pluginapi.operation.OperationFacet; @@ -71,8 +70,7 @@ import org.rhq.plugins.cassandra.util.KeyspaceService; /** * @author John Sanda */ -public class StorageNodeComponent extends CassandraNodeComponent implements OperationFacet, ConfigurationFacet, - DeleteResourceFacet { +public class StorageNodeComponent extends CassandraNodeComponent implements OperationFacet, ConfigurationFacet {
private Log log = LogFactory.getLog(StorageNodeComponent.class);
@@ -91,29 +89,6 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper configDelegate.updateResourceConfiguration(configurationUpdateReport); }
- @Override - public void deleteResource() throws Exception { - OperationResult shutdownResult = shutdownIfNecessary(); - if (shutdownResult.getErrorMessage() != null) { - throw new Exception("Cannot delete storage node [resourceKey: " + getResourceContext().getResourceKey() + - "]: " + shutdownResult.getErrorMessage()); - } - - log.info("Purging data directories"); - Configuration pluginConfig = getResourceContext().getPluginConfiguration(); - String yamlProp = pluginConfig.getSimpleValue("yamlConfiguration"); - File yamlFile = new File(yamlProp); - ConfigEditor yamlEditor = new ConfigEditor(yamlFile); - yamlEditor.load(); - purgeDataDirs(yamlEditor); - - File basedir = getBasedir(); - log.info("Purging installation directory " + basedir); - purgeDir(basedir); - - log.info("Finished deleting storage node " + getResourceContext().getResourceKey()); - } - private OperationResult shutdownIfNecessary() { log.info("Shutting down " + getResourceContext().getResourceKey());
@@ -159,14 +134,18 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper return readRepair(); } else if (name.equals("updateConfiguration")) { return updateConfiguration(parameters); - } else if (name.equals("updateKnownNodes")) { - return updateKnownNodes(parameters); + } else if (name.equals("announce")) { + return announce(parameters); + } else if (name.equals("unannounce")) { + return unannounce(parameters); } else if (name.equals("prepareForBootstrap")) { return prepareForBootstrap(parameters); } else if (name.equals("shutdown")) { return shutdownStorageNode(); } else if (name.equals("decommission")) { return decommission(); + } else if (name.equals("uninstall")) { + return uninstall(); } else { return super.invokeOperation(name, parameters); } @@ -304,6 +283,41 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper return result; }
+ private OperationResult uninstall() { + OperationResult result = new OperationResult(); + OperationResult shutdownResult = shutdownIfNecessary(); + if (shutdownResult.getErrorMessage() != null) { + result.setErrorMessage("Failed to shut down storage node: " + shutdownResult.getErrorMessage()); + } else { + File basedir = getBasedir(); + if (basedir.exists()) { + log.info("Purging data directories"); + Configuration pluginConfig = getResourceContext().getPluginConfiguration(); + String yamlProp = pluginConfig.getSimpleValue("yamlConfiguration"); + File yamlFile = new File(yamlProp); + ConfigEditor yamlEditor = new ConfigEditor(yamlFile); + yamlEditor.load(); + purgeDataDirs(yamlEditor); + + log.info("Purging installation directory " + basedir); + purgeDir(basedir); + + log.info("Finished deleting storage node " + getResourceContext().getResourceKey()); + } else { + log.info(basedir + " does not exist. Storage node files have already been purged."); + } + } + return result; + } + + private OperationResult announce(Configuration params) { + return updateKnownNodes(params); + } + + private OperationResult unannounce(Configuration params) { + return updateKnownNodes(params); + } + private OperationResult updateKnownNodes(Configuration params) { OperationResult result = new OperationResult();
diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index 95c1723..5159b95 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -12,8 +12,7 @@ name="RHQ Storage Node" discovery="StorageNodeDiscoveryComponent" class="StorageNodeComponent" - description="RHQ Storage Node" - createDeletePolicy="delete-only"> + description="RHQ Storage Node">
<subcategories> <subcategory name="Client Request Metrics" description="Client Request Metrics"/> @@ -67,7 +66,9 @@ </results> </operation>
- <operation name="decommission" description="Take the Cassandra node out of service"/> + <operation name="decommission" description="Take the storage node out of service"/> + + <operation name="uninstall" description="Removes all of the storage node files from disk"/>
<operation name="readRepair" description="Runs read repair on primar range of rhq and system_auth keyspaces"> <results> @@ -119,7 +120,18 @@ </results> </operation>
- <operation name="updateKnownNodes"> + <operation name="announce"> + <parameters> + <c:list-property name="addresses"> + <c:simple-property name="address"/> + </c:list-property> + </parameters> + <results> + <c:simple-property name="details"/> + </results> + </operation> + + <operation name="unannounce"> <parameters> <c:list-property name="addresses"> <c:simple-property name="address"/>
commit 409f7417f0fb5997bc2e8b8dbe8f5d90749454f5 Author: John Sanda jsanda@redhat.com Date: Fri Aug 16 22:00:19 2013 -0400
adding plugin support for decommissioning and uninstalling a storage node
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index a24a219..64d672d 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -39,6 +39,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.hyperic.sigar.SigarException; import org.mc4j.ems.connection.EmsConnection; +import org.mc4j.ems.connection.EmsInvocationException; import org.mc4j.ems.connection.bean.EmsBean; import org.mc4j.ems.connection.bean.attribute.EmsAttribute; import org.mc4j.ems.connection.bean.operation.EmsOperation; @@ -54,6 +55,7 @@ import org.rhq.core.domain.configuration.PropertyMap; import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.core.pluginapi.configuration.ConfigurationFacet; import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport; +import org.rhq.core.pluginapi.inventory.DeleteResourceFacet; import org.rhq.core.pluginapi.inventory.ProcessScanResult; import org.rhq.core.pluginapi.inventory.ResourceContext; import org.rhq.core.pluginapi.operation.OperationFacet; @@ -69,7 +71,8 @@ import org.rhq.plugins.cassandra.util.KeyspaceService; /** * @author John Sanda */ -public class StorageNodeComponent extends CassandraNodeComponent implements OperationFacet, ConfigurationFacet { +public class StorageNodeComponent extends CassandraNodeComponent implements OperationFacet, ConfigurationFacet, + DeleteResourceFacet {
private Log log = LogFactory.getLog(StorageNodeComponent.class);
@@ -88,11 +91,54 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper configDelegate.updateResourceConfiguration(configurationUpdateReport); }
+ @Override + public void deleteResource() throws Exception { + OperationResult shutdownResult = shutdownIfNecessary(); + if (shutdownResult.getErrorMessage() != null) { + throw new Exception("Cannot delete storage node [resourceKey: " + getResourceContext().getResourceKey() + + "]: " + shutdownResult.getErrorMessage()); + } + + log.info("Purging data directories"); + Configuration pluginConfig = getResourceContext().getPluginConfiguration(); + String yamlProp = pluginConfig.getSimpleValue("yamlConfiguration"); + File yamlFile = new File(yamlProp); + ConfigEditor yamlEditor = new ConfigEditor(yamlFile); + yamlEditor.load(); + purgeDataDirs(yamlEditor); + + File basedir = getBasedir(); + log.info("Purging installation directory " + basedir); + purgeDir(basedir); + + log.info("Finished deleting storage node " + getResourceContext().getResourceKey()); + } + + private OperationResult shutdownIfNecessary() { + log.info("Shutting down " + getResourceContext().getResourceKey()); + + ProcessInfo process = getResourceContext().getNativeProcess(); + if (process == null) { + File pidFile = new File(getBinDir(), "cassandra.pid"); + if (pidFile.exists()) { + return shutdownStorageNode(); + } else { + return new OperationResult("Storage node is not running"); + } + } else { + return shutdownStorageNode(); + } + } + private File getBasedir() { Configuration pluginConfig = getResourceContext().getPluginConfiguration(); return new File(pluginConfig.getSimpleValue("baseDir")); }
+ private File getBinDir() { + return new File(getBasedir(), "bin"); + } + private File getConfDir() { return new File(getBasedir(), "conf"); } @@ -105,6 +151,8 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper public OperationResult invokeOperation(String name, Configuration parameters) throws Exception { if (name.equals("addNodeMaintenance")) { return nodeAdded(parameters); + } else if (name.equals("removeNodeMaintenance")) { + return nodeRemoved(parameters); } else if (name.equals("prepareForUpgrade")) { return prepareForUpgrade(parameters); } else if (name.equals("readRepair")) { @@ -117,6 +165,8 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper return prepareForBootstrap(parameters); } else if (name.equals("shutdown")) { return shutdownStorageNode(); + } else if (name.equals("decommission")) { + return decommission(); } else { return super.invokeOperation(name, parameters); } @@ -237,6 +287,23 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper return result; }
+ private OperationResult decommission() { + log.info("Decommissioning " + getResourceContext().getResourceKey()); + + OperationResult result = new OperationResult(); + try { + EmsConnection emsConnection = getEmsConnection(); + EmsBean storageService = emsConnection.getBean("org.apache.cassandra.db:type=StorageService"); + Class<?>[] emptyParams = new Class<?>[0]; + + EmsOperation operation = storageService.getOperation("decommission", emptyParams); + operation.invoke((Object[]) emptyParams); + } catch (EmsInvocationException e) { + result.setErrorMessage("Decommission operation failed: " + ThrowableUtil.getAllMessages(e)); + } + return result; + } + private OperationResult updateKnownNodes(Configuration params) { OperationResult result = new OperationResult();
@@ -295,11 +362,7 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper try { configEditor.load();
- purgeDir(new File(configEditor.getCommitLogDirectory())); - for (String dir : configEditor.getDataFileDirectories()) { - purgeDir(new File(dir)); - } - purgeDir(new File(configEditor.getSavedCachesDirectory())); + purgeDataDirs(configEditor);
log.info("Updating cluster settings");
@@ -357,6 +420,14 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper } }
+ private void purgeDataDirs(ConfigEditor configEditor) { + purgeDir(new File(configEditor.getCommitLogDirectory())); + for (String dir : configEditor.getDataFileDirectories()) { + purgeDir(new File(dir)); + } + purgeDir(new File(configEditor.getSavedCachesDirectory())); + } + private void purgeDir(File dir) { log.info("Purging " + dir); FileUtil.purge(dir, true); @@ -377,6 +448,14 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper }
private OperationResult nodeAdded(Configuration params) { + return performTopologyChangeMaintenance(params); + } + + private OperationResult nodeRemoved(Configuration params) { + return performTopologyChangeMaintenance(params); + } + + private OperationResult performTopologyChangeMaintenance(Configuration params) { boolean runRepair = params.getSimple("runRepair").getBooleanValue(); boolean updateSeedsList = params.getSimple("updateSeedsList").getBooleanValue();
diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index cc01c9d..95c1723 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -12,7 +12,8 @@ name="RHQ Storage Node" discovery="StorageNodeDiscoveryComponent" class="StorageNodeComponent" - description="RHQ Storage Node"> + description="RHQ Storage Node" + createDeletePolicy="delete-only">
<subcategories> <subcategory name="Client Request Metrics" description="Client Request Metrics"/> @@ -66,6 +67,8 @@ </results> </operation>
+ <operation name="decommission" description="Take the Cassandra node out of service"/> + <operation name="readRepair" description="Runs read repair on primar range of rhq and system_auth keyspaces"> <results> <c:list-property name="results"> @@ -97,6 +100,25 @@ </results> </operation>
+ <operation name="removeNodeMaintenance"> + <parameters> + <c:simple-property name="runRepair" type="boolean" default="true"/> + <c:simple-property name="updateSeedsList" type="boolean" default="true"/> + <c:list-property name="seedsList"> + <c:simple-property name="seed" type="string"/> + </c:list-property> + </parameters> + <results> + <c:list-property name="results"> + <c:map-property name="resultsMap"> + <c:simple-property name="task" type="string"/> + <c:simple-property name="succeeded" type="boolean"/> + <c:simple-property name="details" type="string"/> + </c:map-property> + </c:list-property> + </results> + </operation> + <operation name="updateKnownNodes"> <parameters> <c:list-property name="addresses">
commit e204ed6106c0050ad4ef689e1178c4601cb30177 Author: mtho11 mikecthompson@gmail.com Date: Thu Aug 15 21:56:44 2013 -0700
Fix refresh to keep graph open after auto-refresh.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceDetailView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceDetailView.java index ae51195..9bd1ea6 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceDetailView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/ResourceDetailView.java @@ -114,6 +114,8 @@ public class ResourceDetailView extends
private ResourceComposite resourceComposite;
+ private MetricsResourceView metricsResourceView; + //private List<ResourceSelectListener> selectListeners = new ArrayList<ResourceSelectListener>();
private TwoLevelTab summaryTab; @@ -391,7 +393,10 @@ public class ResourceDetailView extends viewFactory = (!visible) ? null : new ViewFactory() { @Override public Canvas createView() { - return new MetricsResourceView(resource); + if(null == metricsResourceView){ + metricsResourceView = new MetricsResourceView(resource); + } + return metricsResourceView; } }; updateSubTab(this.monitoringTab, this.monitorMetrics, visible, visibleToIE8, viewFactory); diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java index db9abd4..242efe0 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java @@ -81,6 +81,7 @@ import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPre public class MetricsTableView extends Table<MetricsViewDataSource> implements Refreshable {
private final Resource resource; + private boolean rendered = false; private final AbstractD3GraphListView abstractD3GraphListView;
private final MeasurementUserPreferences measurementUserPrefs; @@ -115,19 +116,22 @@ public class MetricsTableView extends Table<MetricsViewDataSource> implements Re ArrayList<ListGridField> fields = getDataSource().getListGridFields(); setListGridFields(fields.toArray(new ListGridField[0]));
- addTableAction(MSG.view_measureTable_getLive(), new ShowLiveDataTableAction(this)); - addExtraWidget(addToDashboardComponent, false); - addToDashboardComponent.disableAddToDashboardButton(); - metricsTableListGrid.addSelectionChangedHandler(new SelectionChangedHandler() { - @Override - public void onSelectionChanged(SelectionEvent selectionEvent) { - if (metricsTableListGrid.getSelectedRecords().length > 0) { - addToDashboardComponent.enableAddToDashboardButton(); - } else { - addToDashboardComponent.disableAddToDashboardButton(); + if(!rendered){ + addTableAction(MSG.view_measureTable_getLive(), new ShowLiveDataTableAction(this)); + addExtraWidget(addToDashboardComponent, false); + addToDashboardComponent.disableAddToDashboardButton(); + metricsTableListGrid.addSelectionChangedHandler(new SelectionChangedHandler() { + @Override + public void onSelectionChanged(SelectionEvent selectionEvent) { + if (metricsTableListGrid.getSelectedRecords().length > 0) { + addToDashboardComponent.enableAddToDashboardButton(); + } else { + addToDashboardComponent.disableAddToDashboardButton(); + } } - } - }); + }); + rendered = true; + } }
private static class ShowLiveDataTableAction implements TableAction {
commit 5ce350ae60dd09e338bcaf795dd929a0394affde Author: Jirka Kremser jkremser@redhat.com Date: Fri Aug 16 15:50:36 2013 +0200
Adding new component for editing the storage cluster configuration (stored in the system settings).
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageClusterSettings.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageClusterSettings.java new file mode 100644 index 0000000..661ad18 --- /dev/null +++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageClusterSettings.java @@ -0,0 +1,56 @@ +package org.rhq.core.domain.cloud; + +import java.io.Serializable; + +/** + * @author John Sanda + */ +public class StorageClusterSettings implements Serializable { + + private static final long serialVersionUID = 1; + + private int cqlPort; + + private int gossipPort; + + public int getCqlPort() { + return cqlPort; + } + + public void setCqlPort(int cqlPort) { + this.cqlPort = cqlPort; + } + + public int getGossipPort() { + return gossipPort; + } + + public void setGossipPort(int gossipPort) { + this.gossipPort = gossipPort; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + StorageClusterSettings that = (StorageClusterSettings) o; + + if (cqlPort != that.cqlPort) return false; + if (gossipPort != that.gossipPort) return false; + + return true; + } + + @Override + public int hashCode() { + int result = cqlPort; + result = 29 * result + gossipPort; + return result; + } + + @Override + public String toString() { + return "StorageClusterSettings[cqlPort=" + cqlPort + ", gossipPort=" + gossipPort + "]"; + } +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/ClusterConfigurationEditor.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/ClusterConfigurationEditor.java new file mode 100644 index 0000000..bd11d03 --- /dev/null +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/ClusterConfigurationEditor.java @@ -0,0 +1,238 @@ +/* + * RHQ Management Platform + * Copyright (C) 2005-2013 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package org.rhq.enterprise.gui.coregui.client.admin.storage; + +import java.util.ArrayList; +import java.util.List; + +import com.google.gwt.user.client.rpc.AsyncCallback; +import com.smartgwt.client.types.Alignment; +import com.smartgwt.client.types.Overflow; +import com.smartgwt.client.util.BooleanCallback; +import com.smartgwt.client.util.SC; +import com.smartgwt.client.widgets.events.ClickEvent; +import com.smartgwt.client.widgets.events.ClickHandler; +import com.smartgwt.client.widgets.form.fields.FormItem; +import com.smartgwt.client.widgets.form.fields.StaticTextItem; +import com.smartgwt.client.widgets.form.fields.TextItem; +import com.smartgwt.client.widgets.form.validator.IsIntegerValidator; +import com.smartgwt.client.widgets.form.validator.Validator; +import com.smartgwt.client.widgets.layout.LayoutSpacer; +import com.smartgwt.client.widgets.toolbar.ToolStrip; + +import org.rhq.core.domain.cloud.StorageClusterSettings; +import org.rhq.enterprise.gui.coregui.client.CoreGUI; +import org.rhq.enterprise.gui.coregui.client.RefreshableView; +import org.rhq.enterprise.gui.coregui.client.components.form.EnhancedDynamicForm; +import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; +import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedIButton; +import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedToolStrip; +import org.rhq.enterprise.gui.coregui.client.util.enhanced.EnhancedVLayout; +import org.rhq.enterprise.gui.coregui.client.util.message.Message; + +/** + * The component for editing the cluster wide configuration + * + * @author Jirka Kremser + */ +public class ClusterConfigurationEditor extends EnhancedVLayout implements RefreshableView { + + private EnhancedDynamicForm form; + private EnhancedIButton saveButton; + private boolean oddRow; + private StorageClusterSettings settings; + + private static String FIELD_CQL_PORT = "cql_port"; + private static String FIELD_GOSSIP_PORT = "gossip_port"; + + public ClusterConfigurationEditor() { + super(); + } + + private void fetchClusterSettings() { + GWTServiceLookup.getStorageService().retrieveClusterSettings( + new AsyncCallback<StorageClusterSettings>() { + @Override + public void onFailure(Throwable caught) { + Message message = new Message(MSG.view_configurationHistoryDetails_error_loadFailure(), + Message.Severity.Warning); + } + + @Override + public void onSuccess(StorageClusterSettings settings) { + ClusterConfigurationEditor.this.settings = settings; + prepareForm(); + } + }); + } + + private void save() { + updateSettings(); + GWTServiceLookup.getStorageService().updateClusterSettings(settings, new AsyncCallback<Void>() { + public void onSuccess(Void result) { + Message msg = new Message("Storage node settings were successfully updated.", Message.Severity.Info); + CoreGUI.getMessageCenter().notify(msg); + } + + public void onFailure(Throwable caught) { + CoreGUI.getErrorHandler().handleError("Unable to update the storage node settings.", caught); + } + }); + } + + private List<FormItem> buildOneFormRowWithValidator(String name, String title, String value, String description, + Validator validator) { + return buildOneFormRow(name, title, value, description, false, validator); + } + + private List<FormItem> buildOneFormRow(String name, String title, String value, String description, + boolean unitsDropdown, Validator validator) { + List<FormItem> fields = new ArrayList<FormItem>(); + StaticTextItem nameItem = new StaticTextItem(); + nameItem.setStartRow(true); + nameItem.setValue("<b>" + title + "</b>"); + nameItem.setShowTitle(false); + nameItem.setCellStyle(oddRow ? "OddRow" : "EvenRow"); + fields.add(nameItem); + + FormItem valueItem = null; + valueItem = new TextItem(); + valueItem.setName(name); + valueItem.setValue(value); + valueItem.setWidth(220); + if (validator != null) { + valueItem.setValidators(validator); + } + valueItem.setValidateOnChange(true); + valueItem.setAlign(Alignment.CENTER); + valueItem.setShowTitle(false); + valueItem.setRequired(true); + valueItem.setCellStyle(oddRow ? "OddRow" : "EvenRow"); + fields.add(valueItem); + + StaticTextItem descriptionItem = new StaticTextItem(); + descriptionItem.setValue(description); + descriptionItem.setShowTitle(false); + descriptionItem.setEndRow(true); + descriptionItem.setCellStyle(oddRow ? "OddRow" : "EvenRow"); + fields.add(descriptionItem); + + oddRow = !oddRow; + return fields; + } + + private List<FormItem> buildHeaderItems() { + List<FormItem> fields = new ArrayList<FormItem>(); + fields.add(createHeaderTextItem(MSG.view_configEdit_property())); + fields.add(createHeaderTextItem(MSG.common_title_value())); + fields.add(createHeaderTextItem(MSG.common_title_description())); + return fields; + } + + private StaticTextItem createHeaderTextItem(String value) { + StaticTextItem unsetHeader = new StaticTextItem(); + unsetHeader.setValue(value); + unsetHeader.setShowTitle(false); + unsetHeader.setCellStyle("configurationEditorHeaderCell"); + return unsetHeader; + } + + @Override + protected void onDraw() { + super.onDraw(); + refresh(); + } + + private void prepareForm() { + form = new EnhancedDynamicForm(); + form.setHiliteRequiredFields(true); + form.setNumCols(3); + form.setCellPadding(5); + form.setColWidths(190, 220, "*"); + form.setIsGroup(true); + form.setGroupTitle("Cluster Wide Settings"); + form.setBorder("1px solid #AAA"); + oddRow = true; + + List<FormItem> items = buildHeaderItems(); +// IntegerRangeValidator positiveInteger = new IntegerRangeValidator(); +// positiveInteger.setMin(1); +// positiveInteger.setMax(Integer.MAX_VALUE); + IsIntegerValidator validator = new IsIntegerValidator(); + items.addAll(buildOneFormRowWithValidator(FIELD_CQL_PORT, "CQL Port", String.valueOf(settings.getCqlPort()), + "The port on which the Storage Nodes listens for CQL client connections.", validator)); + +// IntegerRangeValidator portValidator = new IntegerRangeValidator(); +// portValidator.setMin(1); +// portValidator.setMax(65535); // (1 << 16) - 1 + validator = new IsIntegerValidator(); + items.addAll(buildOneFormRowWithValidator(FIELD_GOSSIP_PORT, "Gossip Port", String.valueOf(settings.getGossipPort()), + "The port used for internode communication. This is a shared, cluster-wide setting.", validator)); + form.setFields(items.toArray(new FormItem[items.size()])); + form.setWidth100(); + form.setOverflow(Overflow.VISIBLE); + setWidth100(); + + LayoutSpacer spacer = new LayoutSpacer(); + spacer.setWidth100(); + + ToolStrip toolStrip = buildToolStrip(); + setMembers(form, spacer, toolStrip); + form.validate(); + markForRedraw(); + } + + @Override + public void refresh() { + fetchClusterSettings(); + } + + private EnhancedToolStrip buildToolStrip() { + saveButton = new EnhancedIButton(MSG.common_button_save()); + saveButton.addClickHandler(new ClickHandler() { + public void onClick(ClickEvent clickEvent) { + if (form.validate()) { + SC.ask( + "Changing the cluster wide configuration will eventually affect all the storage nodes. Do you want to continue?", + new BooleanCallback() { + @Override + public void execute(Boolean value) { + if (value) { + save(); + } + } + }); + } + } + }); + EnhancedToolStrip toolStrip = new EnhancedToolStrip(); + toolStrip.setWidth100(); + toolStrip.setMembersMargin(5); + toolStrip.setLayoutMargin(5); + toolStrip.addMember(saveButton); + + return toolStrip; + } + + private StorageClusterSettings updateSettings() { + settings.setCqlPort(Integer.parseInt(form.getValueAsString(FIELD_CQL_PORT))); + settings.setGossipPort(Integer.parseInt(form.getValueAsString(FIELD_GOSSIP_PORT))); + return settings; + } +} diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java index c99cf70c..d6a91cb 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeAdminView.java @@ -18,26 +18,15 @@ */ package org.rhq.enterprise.gui.coregui.client.admin.storage;
-import java.util.ArrayList; import java.util.EnumSet;
import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; -import com.smartgwt.client.data.DataSourceField; -import com.smartgwt.client.types.GroupStartOpen; import com.smartgwt.client.widgets.Label; -import com.smartgwt.client.widgets.grid.CellFormatter; -import com.smartgwt.client.widgets.grid.ListGrid; -import com.smartgwt.client.widgets.grid.ListGridField; -import com.smartgwt.client.widgets.grid.ListGridRecord; import com.smartgwt.client.widgets.tab.events.TabSelectedEvent; import com.smartgwt.client.widgets.tab.events.TabSelectedHandler;
-import org.rhq.core.domain.criteria.AlertCriteria; -import org.rhq.core.domain.criteria.ResourceGroupCriteria; import org.rhq.core.domain.resource.ResourceType; -import org.rhq.core.domain.resource.group.composite.ResourceGroupComposite; -import org.rhq.core.domain.util.PageList; import org.rhq.core.domain.util.collection.ArrayUtils; import org.rhq.enterprise.gui.coregui.client.BookmarkableView; import org.rhq.enterprise.gui.coregui.client.CoreGUI; @@ -45,16 +34,10 @@ import org.rhq.enterprise.gui.coregui.client.IconEnum; import org.rhq.enterprise.gui.coregui.client.LinkManager; import org.rhq.enterprise.gui.coregui.client.ViewPath; import org.rhq.enterprise.gui.coregui.client.admin.AdministrationView; -import org.rhq.enterprise.gui.coregui.client.alert.AlertDataSource; -import org.rhq.enterprise.gui.coregui.client.alert.AlertHistoryView; import org.rhq.enterprise.gui.coregui.client.components.tab.NamedTab; import org.rhq.enterprise.gui.coregui.client.components.tab.NamedTabSet; -import org.rhq.enterprise.gui.coregui.client.components.table.Table; -import org.rhq.enterprise.gui.coregui.client.components.view.HasViewName; import org.rhq.enterprise.gui.coregui.client.components.view.ViewName; import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup; -import org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.configuration.GroupResourceConfigurationEditView; -import org.rhq.enterprise.gui.coregui.client.inventory.resource.AncestryUtil; import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository; import org.rhq.enterprise.gui.coregui.client.util.Log; import org.rhq.enterprise.gui.coregui.client.util.StringUtility; @@ -66,7 +49,7 @@ import org.rhq.enterprise.gui.coregui.client.util.message.Message; * * @author Jirka Kremser */ -public class StorageNodeAdminView extends EnhancedVLayout implements/* HasViewName,*/ BookmarkableView { +public class StorageNodeAdminView extends EnhancedVLayout implements BookmarkableView {
public static final ViewName VIEW_ID = new ViewName("StorageNodes", MSG.view_adminTopology_storageNodes(), IconEnum.STORAGE_NODE); @@ -74,7 +57,7 @@ public class StorageNodeAdminView extends EnhancedVLayout implements/* HasViewNa public static final String VIEW_PATH = AdministrationView.VIEW_ID + "/" + AdministrationView.SECTION_TOPOLOGY_VIEW_ID + "/" + VIEW_ID;
- private static final String GROUP_NAME = "RHQ Storage Nodes"; +// private static final String GROUP_NAME = "RHQ Storage Nodes";
private final NamedTabSet tabset; private TabInfo tableTabInfo = new TabInfo(0, new ViewName("Nodes")); @@ -158,31 +141,36 @@ public class StorageNodeAdminView extends EnhancedVLayout implements/* HasViewNa }); } } else if (tabInfo.equals(settingsTabInfo)) { - ResourceGroupCriteria criteria = new ResourceGroupCriteria(); - criteria.addFilterName(GROUP_NAME); - criteria.setStrict(true); - GWTServiceLookup.getResourceGroupService().findResourceGroupCompositesByCriteria(criteria, - new AsyncCallback<PageList<ResourceGroupComposite>>() { - @Override - public void onFailure(Throwable caught) { - Message message = new Message(MSG.view_group_detail_failLoadComp(String.valueOf(GROUP_NAME)), - Message.Severity.Warning); - CoreGUI.goToView(VIEW_ID.getName(), message); - } - - @Override - public void onSuccess(PageList<ResourceGroupComposite> result) { - if (result.isEmpty()) { - onFailure(new Exception("Group with name [" + GROUP_NAME + "] does not exist.")); - } else { - ResourceGroupComposite groupComposite = result.get(0); - loadResourceType(groupComposite.getResourceGroup().getResourceType().getId()); - tabset.getTabByName(tabInfo.name.getName()).setPane( - new GroupResourceConfigurationEditView(groupComposite)); - tabset.selectTab(tabInfo.index); - } - } - }); + ClusterConfigurationEditor editor = new ClusterConfigurationEditor(); + tabset.getTabByName(tabInfo.name.getName()).setPane(editor); + tabset.selectTab(tabInfo.index); + + // we don't group configuration editor anymore +// ResourceGroupCriteria criteria = new ResourceGroupCriteria(); +// criteria.addFilterName(GROUP_NAME); +// criteria.setStrict(true); +// GWTServiceLookup.getResourceGroupService().findResourceGroupCompositesByCriteria(criteria, +// new AsyncCallback<PageList<ResourceGroupComposite>>() { +// @Override +// public void onFailure(Throwable caught) { +// Message message = new Message(MSG.view_group_detail_failLoadComp(String.valueOf(GROUP_NAME)), +// Message.Severity.Warning); +// CoreGUI.goToView(VIEW_ID.getName(), message); +// } +// +// @Override +// public void onSuccess(PageList<ResourceGroupComposite> result) { +// if (result.isEmpty()) { +// onFailure(new Exception("Group with name [" + GROUP_NAME + "] does not exist.")); +// } else { +// ResourceGroupComposite groupComposite = result.get(0); +// loadResourceType(groupComposite.getResourceGroup().getResourceType().getId()); +// tabset.getTabByName(tabInfo.name.getName()).setPane( +// new GroupResourceConfigurationEditView(groupComposite)); +// tabset.selectTab(tabInfo.index); +// } +// } +// }); } }
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java index 38829a5..72f17b18 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/gwt/StorageGWTService.java @@ -27,6 +27,7 @@ import java.util.Map;
import com.google.gwt.user.client.rpc.RemoteService;
+import org.rhq.core.domain.cloud.StorageClusterSettings; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; @@ -87,4 +88,8 @@ public interface StorageGWTService extends RemoteService { StorageNodeConfigurationComposite retrieveConfiguration(StorageNode storageNode) throws RuntimeException;
void updateConfiguration(StorageNodeConfigurationComposite storageNodeConfiguration) throws RuntimeException; + + StorageClusterSettings retrieveClusterSettings() throws RuntimeException; + + void updateClusterSettings(StorageClusterSettings clusterSettings) throws RuntimeException; } diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java index b7437e3..ae18075 100644 --- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java +++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/server/gwt/StorageGWTServiceImpl.java @@ -26,6 +26,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Map;
+import org.rhq.core.domain.cloud.StorageClusterSettings; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; @@ -40,6 +41,7 @@ import org.rhq.enterprise.server.cloud.StorageNodeManagerLocal; import org.rhq.enterprise.server.measurement.util.MeasurementUtils; import org.rhq.enterprise.server.operation.OperationManagerLocal; import org.rhq.enterprise.server.resource.ResourceManagerLocal; +import org.rhq.enterprise.server.storage.StorageClusterSettingsManagerLocal; import org.rhq.enterprise.server.util.LookupUtil;
/** @@ -51,6 +53,8 @@ public class StorageGWTServiceImpl extends AbstractGWTServiceImpl implements Sto
private StorageNodeManagerLocal storageNodeManager = LookupUtil.getStorageNodeManager();
+ private StorageClusterSettingsManagerLocal storageClusterSettingsManager = LookupUtil.getStorageClusterSettingsManagerLocal(); + private OperationManagerLocal operationManager = LookupUtil.getOperationManager();
private ResourceManagerLocal resourceManager = LookupUtil.getResourceManager(); @@ -179,4 +183,23 @@ public class StorageGWTServiceImpl extends AbstractGWTServiceImpl implements Sto throw getExceptionToThrowToClient(t); } } + + @Override + public void updateClusterSettings(StorageClusterSettings clusterSettings) throws RuntimeException { + try { + storageClusterSettingsManager.setClusterSettings(getSessionSubject(), clusterSettings); + } catch (Throwable t) { + throw getExceptionToThrowToClient(t); + } + } + + @Override + public StorageClusterSettings retrieveClusterSettings() throws RuntimeException { + try { + return SerialUtility.prepare(storageClusterSettingsManager.getClusterSettings(getSessionSubject()), + "StorageGWTServiceImpl.retrieveClusterSettings"); + } catch (Throwable t) { + throw getExceptionToThrowToClient(t); + } + } } diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/storage/FakeStorageClusterSettingsManagerBean.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/storage/FakeStorageClusterSettingsManagerBean.java index 3ac61e4..c9738a7 100644 --- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/storage/FakeStorageClusterSettingsManagerBean.java +++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/storage/FakeStorageClusterSettingsManagerBean.java @@ -3,6 +3,7 @@ package org.rhq.enterprise.server.storage; import javax.ejb.Stateless;
import org.rhq.core.domain.auth.Subject; +import org.rhq.core.domain.cloud.StorageClusterSettings;
/** * @author John Sanda diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 861e3fa..5cce984 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -53,6 +53,7 @@ import org.rhq.core.domain.auth.Subject; import org.rhq.core.domain.authz.Permission; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.cloud.StorageNode.OperationMode; +import org.rhq.core.domain.cloud.StorageClusterSettings; import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite; import org.rhq.core.domain.cloud.StorageNodeLoadComposite; import org.rhq.core.domain.common.JobTrigger; @@ -84,7 +85,6 @@ import org.rhq.enterprise.server.resource.ResourceManagerLocal; import org.rhq.enterprise.server.resource.ResourceTypeManagerLocal; import org.rhq.enterprise.server.rest.reporting.MeasurementConverter; import org.rhq.enterprise.server.scheduler.SchedulerLocal; -import org.rhq.enterprise.server.storage.StorageClusterSettings; import org.rhq.enterprise.server.storage.StorageClusterSettingsManagerLocal; import org.rhq.enterprise.server.storage.StorageNodeOperationsHandlerLocal; import org.rhq.enterprise.server.util.CriteriaQueryGenerator; diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettings.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettings.java deleted file mode 100644 index 2098acd..0000000 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettings.java +++ /dev/null @@ -1,56 +0,0 @@ -package org.rhq.enterprise.server.storage; - -import java.io.Serializable; - -/** - * @author John Sanda - */ -public class StorageClusterSettings implements Serializable { - - private static final long serialVersionUID = 1; - - private int cqlPort; - - private int gossipPort; - - public int getCqlPort() { - return cqlPort; - } - - public void setCqlPort(int cqlPort) { - this.cqlPort = cqlPort; - } - - public int getGossipPort() { - return gossipPort; - } - - public void setGossipPort(int gossipPort) { - this.gossipPort = gossipPort; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - StorageClusterSettings that = (StorageClusterSettings) o; - - if (cqlPort != that.cqlPort) return false; - if (gossipPort != that.gossipPort) return false; - - return true; - } - - @Override - public int hashCode() { - int result = cqlPort; - result = 29 * result + gossipPort; - return result; - } - - @Override - public String toString() { - return "StorageClusterSettings[cqlPort=" + cqlPort + ", gossipPort=" + gossipPort + "]"; - } -} diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java index 9418bca..64fb310 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerBean.java @@ -6,8 +6,11 @@ import javax.ejb.EJB; import javax.ejb.Stateless;
import org.rhq.core.domain.auth.Subject; +import org.rhq.core.domain.authz.Permission; +import org.rhq.core.domain.cloud.StorageClusterSettings; import org.rhq.core.domain.common.composite.SystemSetting; import org.rhq.core.domain.common.composite.SystemSettings; +import org.rhq.enterprise.server.authz.RequiredPermission; import org.rhq.enterprise.server.system.SystemManagerLocal;
/** @@ -20,6 +23,7 @@ public class StorageClusterSettingsManagerBean implements StorageClusterSettings private SystemManagerLocal systemManager;
@Override + @RequiredPermission(Permission.MANAGE_SETTINGS) public StorageClusterSettings getClusterSettings(Subject subject) { SystemSettings settings = systemManager.getSystemSettings(subject); Map<String, String> settingsMap = settings.toMap(); @@ -43,6 +47,7 @@ public class StorageClusterSettingsManagerBean implements StorageClusterSettings }
@Override + @RequiredPermission(Permission.MANAGE_SETTINGS) public void setClusterSettings(Subject subject, StorageClusterSettings clusterSettings) { SystemSettings settings = new SystemSettings(); settings.put(SystemSetting.STORAGE_CQL_PORT, Integer.toString(clusterSettings.getCqlPort())); diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerLocal.java index cb63bc4..f98cccc 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerLocal.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageClusterSettingsManagerLocal.java @@ -3,6 +3,7 @@ package org.rhq.enterprise.server.storage; import javax.ejb.Local;
import org.rhq.core.domain.auth.Subject; +import org.rhq.core.domain.cloud.StorageClusterSettings;
/** * @author John Sanda diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java index 2a79c59..4969c46 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java @@ -17,6 +17,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory;
import org.rhq.core.domain.auth.Subject; +import org.rhq.core.domain.cloud.StorageClusterSettings; import org.rhq.core.domain.cloud.StorageNode; import org.rhq.core.domain.common.JobTrigger; import org.rhq.core.domain.configuration.Configuration; diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/LookupUtil.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/LookupUtil.java index 5aded7d..3cde894 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/LookupUtil.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/LookupUtil.java @@ -188,8 +188,10 @@ import org.rhq.enterprise.server.scheduler.SchedulerBean; import org.rhq.enterprise.server.scheduler.SchedulerLocal; import org.rhq.enterprise.server.search.SavedSearchManagerBean; import org.rhq.enterprise.server.search.SavedSearchManagerLocal; -import org.rhq.enterprise.server.storage.StorageNodeOperationsHandlerLocal; +import org.rhq.enterprise.server.storage.StorageClusterSettingsManagerBean; +import org.rhq.enterprise.server.storage.StorageClusterSettingsManagerLocal; import org.rhq.enterprise.server.storage.StorageNodeOperationsHandlerBean; +import org.rhq.enterprise.server.storage.StorageNodeOperationsHandlerLocal; import org.rhq.enterprise.server.subsystem.AlertSubsystemManagerBean; import org.rhq.enterprise.server.subsystem.AlertSubsystemManagerLocal; import org.rhq.enterprise.server.subsystem.ConfigurationSubsystemManagerBean; @@ -492,6 +494,10 @@ public final class LookupUtil { public static StorageNodeOperationsHandlerLocal getStorageNodeOperationsHandler() { return lookupLocal(StorageNodeOperationsHandlerBean.class); } + + public static StorageClusterSettingsManagerLocal getStorageClusterSettingsManagerLocal() { + return lookupLocal(StorageClusterSettingsManagerBean.class); + }
public static ClusterManagerLocal getClusterManager() { return lookupLocal(ClusterManagerBean.class);
commit dcec800ff2597f4aede87e13927b8ec83f68fee6 Author: John Sanda jsanda@redhat.com Date: Thu Aug 15 16:31:49 2013 -0400
updating api checks
diff --git a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml index 08e793c..58bd618 100644 --- a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml +++ b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml @@ -65,6 +65,13 @@ </difference>
<difference> + <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>void deployStorageNode(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.cloud.StorageNode)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + + <difference> <className>org/rhq/enterprise/server/bundle/BundleManagerRemote</className> <differenceType>7012</differenceType> <!-- method added to an interface --> <method>void assignBundlesToBundleGroup(org.rhq.core.domain.auth.Subject, int, int[])</method>
commit e2ba1f5cada1f6b4fda5dd1b11fb75f0be1138b0 Author: John Sanda jsanda@redhat.com Date: Thu Aug 15 01:56:04 2013 -0400
add maintenance flag for queueing up storage nodes to be processed
diff --git a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml index 9633be5..2c48bbd 100644 --- a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml +++ b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml @@ -2212,6 +2212,7 @@ </schemaSpec>
<schemaSpec version="2.137"> + <schema-addColumn table="RHQ_STORAGE_NODE" column="MAINTENANCE_PENDING" columnType="BOOLEAN"/> <schema-addColumn table="RHQ_STORAGE_NODE" column="ERROR_MSG" columnType="LONGVARCHAR"/> <schema-addColumn table="RHQ_STORAGE_NODE" column="RESOURCE_OP_HIST_ID" columnType="INTEGER"/> <schema-directSQL> @@ -2221,6 +2222,12 @@ FOREIGN KEY (RESOURCE_OP_HIST_ID) REFERENCES RHQ_OPERATION_HISTORY (ID) </statement> + <statement targetDBVendor="postgresql" desc="Set maintenance_pending flag to false for existing storage nodes"> + UPDATE RHQ_STORAGE_NODE SET IGNORED = false + </statement> + <statement targetDBVendor="oracle" desc="Set maintenance_pending flag to false for existing storage nodes"> + UPDATE RHQ_STORAGE_NODE SET IGNORED = 0 + </statement> </schema-directSQL> </schemaSpec>
commit 9b4f45e77db9a437cdc861359fab77bc8ef2c127 Author: John Sanda jsanda@redhat.com Date: Wed Aug 14 23:02:34 2013 -0400
adding more error handling for storage node deployments
In my previous commit I added code to persist resource operation failures that occur during storage node deployment. This commit adds error handling for unexpected server side errors. Errors are logged to the StorageNode entity in a separate transaction to ensure that the error message gets persisted.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeDeploymentException.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeDeploymentException.java deleted file mode 100644 index fca6e96..0000000 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeDeploymentException.java +++ /dev/null @@ -1,22 +0,0 @@ -package org.rhq.enterprise.server.storage; - -/** - * @author John Sanda - */ -public class StorageNodeDeploymentException extends RuntimeException { - - public StorageNodeDeploymentException() { - } - - public StorageNodeDeploymentException(String message) { - super(message); - } - - public StorageNodeDeploymentException(String message, Throwable cause) { - super(message, cause); - } - - public StorageNodeDeploymentException(Throwable cause) { - super(cause); - } -}
commit 414d99725afd354290d44295e00dc8bf1a974025 Author: John Sanda jsanda@redhat.com Date: Wed Aug 14 16:13:18 2013 -0400
add fields in StorageNode for error reporting during maintenance
Storage node deployment and undeployment consists of a series of different resource operations. The (un)deployment work flow could fail due to one of those resource operations. When that occurs we can provide a direct link in the StorageNode.failedOperation field to the operation history of the failed operation. This direct link will help with providing quick insight into the cause of the failure.
There is also a new errorMessage field in StorageNode. This field will provide summary info about the failure. If the failure is in server side processing and not in a resource operation, then the errorMessage field should be set but not the failedOperation field.
diff --git a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml index 949ca4b..9633be5 100644 --- a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml +++ b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml @@ -2211,6 +2211,19 @@ </schema-directSQL> </schemaSpec>
+ <schemaSpec version="2.137"> + <schema-addColumn table="RHQ_STORAGE_NODE" column="ERROR_MSG" columnType="LONGVARCHAR"/> + <schema-addColumn table="RHQ_STORAGE_NODE" column="RESOURCE_OP_HIST_ID" columnType="INTEGER"/> + <schema-directSQL> + <statement desc="Creating RHQ_STORAGE_NODE foreign key to RHQ_OPERATION_HISTORY"> + ALTER TABLE RHQ_STORAGE_NODE + ADD CONSTRAINT RHQ_SN_OP_HIST_ID_FK + FOREIGN KEY (RESOURCE_OP_HIST_ID) + REFERENCES RHQ_OPERATION_HISTORY (ID) + </statement> + </schema-directSQL> + </schemaSpec> + </dbupgrade> </target> </project>
commit 7440eb0bd4e594802ee492c86b34e6043186c8f4 Author: John Sanda jsanda@redhat.com Date: Wed Aug 14 12:08:13 2013 -0400
refactoring state transitions and adding method for deployment
When a storage node is committed into inventory its operation mode is set to INSTALLED, unless the storage node entity exists in which case the mode is set to NORMAL. After creating the storage node entity, deployment is started. The operation mode changs to ANNOUNCE. The address of the new node is announced to existing cluster nodes. After announcing completes, the operation mode changes to BOOTSTRAP, and the prepareForBootstrap operation is run on the new node. When the new node is reported up as part of the cluster, the operation mode of all cluster modes is set to ADD_NODE_MAINTENANCE. The addNodeMaintenance operation is then run on each storage node. When that operation completes, the node's operation mode is set back to NORMAL.
The StorageNodeManagerBean.deployStorageNode method looks at the operation mode of the node to determine at what step in the process to start the deployment. The deployStorageNode method is the only method that the UI or remote API will need to invoke to start or resume a deployment.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeDeploymentException.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeDeploymentException.java new file mode 100644 index 0000000..fca6e96 --- /dev/null +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeDeploymentException.java @@ -0,0 +1,22 @@ +package org.rhq.enterprise.server.storage; + +/** + * @author John Sanda + */ +public class StorageNodeDeploymentException extends RuntimeException { + + public StorageNodeDeploymentException() { + } + + public StorageNodeDeploymentException(String message) { + super(message); + } + + public StorageNodeDeploymentException(String message, Throwable cause) { + super(message, cause); + } + + public StorageNodeDeploymentException(Throwable cause) { + super(cause); + } +}
commit e696283aacbf34052bce0777af52ac4ee6e1e6bd Author: Stefan Negrea snegrea@redhat.com Date: Wed Aug 14 15:44:07 2013 -0500
Fixing errors after rebase merge.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index cb62d9c..861e3fa 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -153,7 +153,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN private ResourceManagerLocal resourceManager;
@EJB - private StorageClusterSettingsManagerBean storageClusterSettingsManager; + private StorageClusterSettingsManagerLocal storageClusterSettingsManager;
@EJB private StorageNodeOperationsHandlerLocal storageNodeOperationsHandler; @@ -640,7 +640,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
return configuration; } - + @Override @Asynchronous public void updateConfigurationAsync(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration) {
commit a67e1845348543949806de7061b198998c92af27 Author: Stefan Negrea snegrea@redhat.com Date: Wed Aug 14 14:37:26 2013 -0500
[BZ 991598] Add basica node configuration validation to the composite class. It validates heap settings as well as port settings.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 35eec41..cb62d9c 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -153,7 +153,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN private ResourceManagerLocal resourceManager;
@EJB - private StorageClusterSettingsManagerLocal storageClusterSettingsManager; + private StorageClusterSettingsManagerBean storageClusterSettingsManager;
@EJB private StorageNodeOperationsHandlerLocal storageNodeOperationsHandler; @@ -590,7 +590,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
@Override public Integer[] findResourcesWithAlertDefinitions(StorageNode storageNode) { - List<StorageNode> initialStorageNodes = null; + List<StorageNode> initialStorageNodes = getStorageNodes(); if (storageNode == null) { initialStorageNodes = getStorageNodes(); } else { @@ -640,7 +640,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
return configuration; } - + @Override @Asynchronous public void updateConfigurationAsync(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration) {
commit 159290206de43cdb6bac84fd4424cad7f8b31e05 Author: jfclere jfclere@neo2.gva.redhat.com Date: Mon Apr 15 17:07:28 2013 +0200
[BZ 865460] Cannot add a Group to tomcat's UserDatabase
diff --git a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatUserDatabaseComponent.java b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatUserDatabaseComponent.java index 0a95069..ead4956 100644 --- a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatUserDatabaseComponent.java +++ b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatUserDatabaseComponent.java @@ -51,6 +51,7 @@ public class TomcatUserDatabaseComponent extends MBeanResourceComponent<TomcatSe if (TomcatGroupComponent.RESOURCE_TYPE_NAME.equals(resourceTypeName)) { name = report.getResourceConfiguration().getSimple("groupname").getStringValue(); newRoles = report.getResourceConfiguration().getSimple(TomcatGroupComponent.CONFIG_ROLES); + report.getResourceConfiguration().remove(TomcatGroupComponent.CONFIG_ROLES); objectName = String.format("Users:type=Group,groupname="%s",database=UserDatabase", name); operation = "createGroup"; } else if (TomcatRoleComponent.RESOURCE_TYPE_NAME.equals(resourceTypeName)) {
commit 1d7cd37a18df200b7ce9205da2565d46a8ea665c Author: jfclere jfclere@neo2.gva.redhat.com Date: Wed Apr 10 15:22:52 2013 +0200
[BZ 921261] WebModule is reported as DOWN or UNAVAILABLE ... from 00e594847fe67da46f8976df58b5d2324d6ebb48
diff --git a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatWarComponent.java b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatWarComponent.java index 2a39e25..0a51c45 100644 --- a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatWarComponent.java +++ b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatWarComponent.java @@ -370,7 +370,23 @@ public class TomcatWarComponent extends MBeanResourceComponent<TomcatVHostCompon mbeanOperation.invoke(paramValues);
if (!WarOperation.DESTROY.equals(operation)) { - String state = (String) this.webModuleMBean.getAttribute("stateName").refresh(); + String state = null; + try { + // check to see if the mbean is truly active + state = (String) this.webModuleMBean.getAttribute("stateName").refresh(); + } catch (Exception e) { + // if not active an exception may be thrown + state = WarMBeanState.STOPPED; + // try "state" for Tomcat 5.5 + try { + int stateInt = (Integer) this.webModuleMBean.getAttribute("state").refresh(); + if (stateInt == 1) { + state = WarMBeanState.STARTED; + } + } catch (Exception ex) { + // Ignore + } + } String expectedState = getExpectedPostExecutionState(operation); if (!state.equals(expectedState)) { throw new Exception("Failed to " + name + " webapp (value of the 'state' attribute of MBean '"
commit eade99f427f742c2eac5a9e4da0636a52da07cbb Author: jfclere jfclere@neo2.gva.redhat.com Date: Wed Apr 10 15:17:22 2013 +0200
[BZ 921194] Connectors are not properly discovered and therefore are unavailable.. from 00e594847fe67da46f8976df58b5d2324d6ebb48.
diff --git a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatConnectorDiscoveryComponent.java b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatConnectorDiscoveryComponent.java index 7a65a73..32566d5 100644 --- a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatConnectorDiscoveryComponent.java +++ b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatConnectorDiscoveryComponent.java @@ -132,12 +132,14 @@ public class TomcatConnectorDiscoveryComponent extends MBeanResourceDiscoveryCom if (connectorON != null) { EmsBean connectorBean = connection.getBean(connectorON); EmsAttribute executorNameAttrib = connectorBean.getAttribute("executorName"); - Object executorNameValue = executorNameAttrib.getValue(); - if (executorNameValue != null) { - String executorName = executorNameValue.toString(); - if (!executorName.isEmpty() && !executorName.equalsIgnoreCase("Internal")) { - pluginConfiguration.put(new PropertySimple( - TomcatConnectorComponent.PLUGIN_CONFIG_SHARED_EXECUTOR, executorName)); + if (executorNameAttrib != null) { + Object executorNameValue = executorNameAttrib.getValue(); + if (executorNameValue != null) { + String executorName = executorNameValue.toString(); + if (!executorName.isEmpty() && !executorName.equalsIgnoreCase("Internal")) { + pluginConfiguration.put(new PropertySimple( + TomcatConnectorComponent.PLUGIN_CONFIG_SHARED_EXECUTOR, executorName)); + } } } }
commit b633b97c60a2b50ad292a8ba7d19e2e6cb351d34 Author: jfclere jfclere@neo2.gva.redhat.com Date: Wed Apr 10 15:13:02 2013 +0200
Fix BZ 865460 from 417fbb59817edf64a93d3cca00f2c51926379ab2
Conflicts:
modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatUserDatabaseComponent.java
diff --git a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatUserDatabaseComponent.java b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatUserDatabaseComponent.java index ead4956..0a95069 100644 --- a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatUserDatabaseComponent.java +++ b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatUserDatabaseComponent.java @@ -51,7 +51,6 @@ public class TomcatUserDatabaseComponent extends MBeanResourceComponent<TomcatSe if (TomcatGroupComponent.RESOURCE_TYPE_NAME.equals(resourceTypeName)) { name = report.getResourceConfiguration().getSimple("groupname").getStringValue(); newRoles = report.getResourceConfiguration().getSimple(TomcatGroupComponent.CONFIG_ROLES); - report.getResourceConfiguration().remove(TomcatGroupComponent.CONFIG_ROLES); objectName = String.format("Users:type=Group,groupname="%s",database=UserDatabase", name); operation = "createGroup"; } else if (TomcatRoleComponent.RESOURCE_TYPE_NAME.equals(resourceTypeName)) {
commit cf3da14deebde03857ad582f0da93763093151a4 Author: jfclere jfclere@neo2.gva.redhat.com Date: Wed Apr 10 14:33:38 2013 +0200
fix for BZ: 707349 from e7d48240474fba87f1a3c4118de4618fd2c8b32d.
Conflicts:
modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatConnectorDiscoveryComponent.java modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatWarComponent.java modules/plugins/tomcat/src/main/resources/META-INF/rhq-plugin.xml
diff --git a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatConnectorDiscoveryComponent.java b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatConnectorDiscoveryComponent.java index 32566d5..7a65a73 100644 --- a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatConnectorDiscoveryComponent.java +++ b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatConnectorDiscoveryComponent.java @@ -132,14 +132,12 @@ public class TomcatConnectorDiscoveryComponent extends MBeanResourceDiscoveryCom if (connectorON != null) { EmsBean connectorBean = connection.getBean(connectorON); EmsAttribute executorNameAttrib = connectorBean.getAttribute("executorName"); - if (executorNameAttrib != null) { - Object executorNameValue = executorNameAttrib.getValue(); - if (executorNameValue != null) { - String executorName = executorNameValue.toString(); - if (!executorName.isEmpty() && !executorName.equalsIgnoreCase("Internal")) { - pluginConfiguration.put(new PropertySimple( - TomcatConnectorComponent.PLUGIN_CONFIG_SHARED_EXECUTOR, executorName)); - } + Object executorNameValue = executorNameAttrib.getValue(); + if (executorNameValue != null) { + String executorName = executorNameValue.toString(); + if (!executorName.isEmpty() && !executorName.equalsIgnoreCase("Internal")) { + pluginConfiguration.put(new PropertySimple( + TomcatConnectorComponent.PLUGIN_CONFIG_SHARED_EXECUTOR, executorName)); } } } diff --git a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatWarComponent.java b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatWarComponent.java index 0a51c45..2a39e25 100644 --- a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatWarComponent.java +++ b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatWarComponent.java @@ -370,23 +370,7 @@ public class TomcatWarComponent extends MBeanResourceComponent<TomcatVHostCompon mbeanOperation.invoke(paramValues);
if (!WarOperation.DESTROY.equals(operation)) { - String state = null; - try { - // check to see if the mbean is truly active - state = (String) this.webModuleMBean.getAttribute("stateName").refresh(); - } catch (Exception e) { - // if not active an exception may be thrown - state = WarMBeanState.STOPPED; - // try "state" for Tomcat 5.5 - try { - int stateInt = (Integer) this.webModuleMBean.getAttribute("state").refresh(); - if (stateInt == 1) { - state = WarMBeanState.STARTED; - } - } catch (Exception ex) { - // Ignore - } - } + String state = (String) this.webModuleMBean.getAttribute("stateName").refresh(); String expectedState = getExpectedPostExecutionState(operation); if (!state.equals(expectedState)) { throw new Exception("Failed to " + name + " webapp (value of the 'state' attribute of MBean '"
commit 0047c728371fd9ce2a9719792e226f4f6d122247 Author: Jirka Kremser jkremser@redhat.com Date: Wed Aug 14 19:19:52 2013 +0200
Alert view for a single storage node and its child resources.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java index 861e3fa..35eec41 100644 --- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java +++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java @@ -590,7 +590,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
@Override public Integer[] findResourcesWithAlertDefinitions(StorageNode storageNode) { - List<StorageNode> initialStorageNodes = getStorageNodes(); + List<StorageNode> initialStorageNodes = null; if (storageNode == null) { initialStorageNodes = getStorageNodes(); } else {
commit a10b2e819d6a0ad331c7c8de8b6f2ba79629415e Author: Jay Shaughnessy jshaughn@redhat.com Date: Wed Aug 14 11:20:02 2013 -0400
remove entries for stuff still in a branch
diff --git a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml index 52770ff..08e793c 100644 --- a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml +++ b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml @@ -74,7 +74,7 @@ <difference> <className>org/rhq/enterprise/server/bundle/BundleManagerRemote</className> <differenceType>7012</differenceType> <!-- method added to an interface --> - <method>org.rhq.core.domain.bundle.BundleGroup createBundleGroup(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.bundle.BundleGroup)</method> + <method>org.rhq.core.domain.bundle.BundleGroup createBundleGroup(org.rhq.core.domain.auth.Subject, java.lang.String, java.lang.String)</method> <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
@@ -169,11 +169,4 @@ <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
- <difference> - <className>org/rhq/enterprise/server/bundle/BundleManagerRemote</className> - <differenceType>7012</differenceType> <!-- method added to an interface --> - <method>org.rhq.core.domain.bundle.BundleGroup updateBundleGroup(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.bundle.BundleGroup)</method> - <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> - </difference> - </differences>
commit 8e8062449a5b1ac7e0814724c2232c018e93022c Author: Jay Shaughnessy jshaughn@redhat.com Date: Tue Aug 13 15:01:50 2013 -0400
fix some issues, add some new api methods
diff --git a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml index 08e793c..52770ff 100644 --- a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml +++ b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml @@ -74,7 +74,7 @@ <difference> <className>org/rhq/enterprise/server/bundle/BundleManagerRemote</className> <differenceType>7012</differenceType> <!-- method added to an interface --> - <method>org.rhq.core.domain.bundle.BundleGroup createBundleGroup(org.rhq.core.domain.auth.Subject, java.lang.String, java.lang.String)</method> + <method>org.rhq.core.domain.bundle.BundleGroup createBundleGroup(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.bundle.BundleGroup)</method> <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
@@ -169,4 +169,11 @@ <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> </difference>
+ <difference> + <className>org/rhq/enterprise/server/bundle/BundleManagerRemote</className> + <differenceType>7012</differenceType> <!-- method added to an interface --> + <method>org.rhq.core.domain.bundle.BundleGroup updateBundleGroup(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.bundle.BundleGroup)</method> + <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification> + </difference> + </differences>
rhq-commits@lists.fedorahosted.org