[rhq] modules/enterprise
by John Sanda
modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBeanTest.java | 55 +++++++---
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java | 7 -
2 files changed, 46 insertions(+), 16 deletions(-)
New commits:
commit dc028de9772aa70f64b7e817c38fd2f9a64d5135
Author: John Sanda <jsanda(a)redhat.com>
Date: Thu Jul 4 07:29:32 2013 -0400
fix failing test
Now that StorageNodeManagerBean creates the storage node resource group, the
group has to be deleted during test runs.
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBeanTest.java
index d45ae90..d44d637 100644
--- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBeanTest.java
+++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBeanTest.java
@@ -25,6 +25,10 @@
package org.rhq.enterprise.server.cloud;
+import static org.rhq.enterprise.server.cloud.StorageNodeManagerBean.STORAGE_NODE_GROUP_NAME;
+import static org.rhq.enterprise.server.cloud.StorageNodeManagerBean.STORAGE_NODE_PLUGIN_NAME;
+import static org.rhq.enterprise.server.cloud.StorageNodeManagerBean.STORAGE_NODE_RESOURCE_TYPE_NAME;
+
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
@@ -32,27 +36,30 @@ import java.util.List;
import java.util.Set;
import java.util.UUID;
+import javax.ejb.EJB;
import javax.persistence.Query;
import javax.transaction.Transaction;
import org.testng.Assert;
import org.testng.annotations.Test;
-import org.rhq.core.domain.auth.Subject;
import org.rhq.core.domain.cloud.StorageNode;
import org.rhq.core.domain.configuration.definition.ConfigurationDefinition;
import org.rhq.core.domain.configuration.definition.PropertyDefinitionSimple;
import org.rhq.core.domain.configuration.definition.PropertySimpleType;
+import org.rhq.core.domain.criteria.ResourceGroupCriteria;
import org.rhq.core.domain.criteria.StorageNodeCriteria;
import org.rhq.core.domain.resource.Resource;
import org.rhq.core.domain.resource.ResourceCategory;
import org.rhq.core.domain.resource.ResourceType;
+import org.rhq.core.domain.resource.group.ResourceGroup;
import org.rhq.core.domain.util.PageList;
import org.rhq.core.domain.util.PageOrdering;
+import org.rhq.enterprise.server.auth.SubjectManagerLocal;
import org.rhq.enterprise.server.resource.ResourceTypeManagerLocal;
+import org.rhq.enterprise.server.resource.group.ResourceGroupManagerLocal;
import org.rhq.enterprise.server.test.AbstractEJB3Test;
import org.rhq.enterprise.server.test.TransactionCallback;
-import org.rhq.enterprise.server.util.LookupUtil;
/**
* @author Jirka Kremser
@@ -60,17 +67,19 @@ import org.rhq.enterprise.server.util.LookupUtil;
@Test
public class StorageNodeManagerBeanTest extends AbstractEJB3Test {
+ @EJB
private StorageNodeManagerLocal nodeManager;
+
+ @EJB
private ResourceTypeManagerLocal typeManager;
- private Subject overlord;
- private static final String TEST_PREFIX = "test-";
- @Override
- protected void beforeMethod() throws Exception {
- nodeManager = LookupUtil.getStorageNodeManager();
- typeManager = LookupUtil.getResourceTypeManager();
- overlord = LookupUtil.getSubjectManager().getOverlord();
- }
+ @EJB
+ private ResourceGroupManagerLocal resourceGroupManager;
+
+ @EJB
+ private SubjectManagerLocal subjectManager;
+
+ private static final String TEST_PREFIX = "test-";
@Test
public void testInit() throws Exception {
@@ -79,6 +88,7 @@ public class StorageNodeManagerBeanTest extends AbstractEJB3Test {
try {
prepareScheduler();
+ cleanDatabase();
executeInTransaction(new TransactionCallback() {
@Override
@@ -89,8 +99,6 @@ public class StorageNodeManagerBeanTest extends AbstractEJB3Test {
System.setProperty(cassandraSeedsProperty, addresses.get(0) + "|123|123," + addresses.get(1)
+ "|987|987," + addresses.get(2) + "|123|123");
- cleanDatabase();
-
// create the resource type if it doesn't exist
ResourceType testResourceType = typeManager.getResourceTypeByNameAndPlugin("RHQ Storage Node",
"RHQStorage");
@@ -175,7 +183,8 @@ public class StorageNodeManagerBeanTest extends AbstractEJB3Test {
criteria.addFilterAddress(prefix);
// use DESC just to make sure sorting on name is different than insert order
criteria.addSortAddress(PageOrdering.DESC);
- PageList<StorageNode> list = nodeManager.findStorageNodesByCriteria(overlord, criteria);
+ PageList<StorageNode> list = nodeManager.findStorageNodesByCriteria(subjectManager.getOverlord(),
+ criteria);
assertTrue("The number of found storage nodes should be " + storageNodeCount + ". Was: " + list.size(),
storageNodeCount == list.size());
@@ -205,6 +214,26 @@ public class StorageNodeManagerBeanTest extends AbstractEJB3Test {
// this method is still needed, because tests calls SLSB methods that are executed in their own transaction
// and the rollback performed once the TransactionCallback is finished just wont clean everything
+ // We can only filter on the group name because the resource type info might not exist in the test
+ // database.
+ ResourceGroupCriteria criteria = new ResourceGroupCriteria();
+ criteria.addFilterName(STORAGE_NODE_GROUP_NAME);
+
+ List<ResourceGroup> groups = resourceGroupManager.findResourceGroupsByCriteria(subjectManager.getOverlord(),
+ criteria);
+
+ if (!groups.isEmpty()) {
+ resourceGroupManager.deleteResourceGroup(subjectManager.getOverlord(), groups.get(0).getId());
+ }
+
+// for (ResourceGroup group : groups) {
+// if (group.getName().equals(STORAGE_NODE_GROUP_NAME)) {
+// resourceGroupManager.deleteResourceGroup(subjectManager.getOverlord(), group.getId());
+// break;
+// }
+// }
+
+
// pause the currently running TX
Transaction runningTransaction = getTransactionManager().suspend();
getTransactionManager().begin();
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
index 874be1e..b21946a 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
@@ -97,11 +97,12 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
private static final String SEEDS_PROP = "rhq.cassandra.seeds";
- private static final String STORAGE_NODE_GROUP_NAME = "RHQ Storage Nodes";
+ // The following have package visibility to make accessible to StorageNodeManagerBeanTest
+ static final String STORAGE_NODE_GROUP_NAME = "RHQ Storage Nodes";
- private static final String STORAGE_NODE_RESOURCE_TYPE_NAME = "RHQ Storage Node";
+ static final String STORAGE_NODE_RESOURCE_TYPE_NAME = "RHQ Storage Node";
- private static final String STORAGE_NODE_PLUGIN_NAME = "RHQStorage";
+ static final String STORAGE_NODE_PLUGIN_NAME = "RHQStorage";
@PersistenceContext(unitName = RHQConstants.PERSISTENCE_UNIT_NAME)
private EntityManager entityManager;
10 years, 10 months
[rhq] Branch 'mtho11/d3-multiline-graph' - modules/enterprise
by mike thompson
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java | 6
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java | 4
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java | 105 +++-------
3 files changed, 46 insertions(+), 69 deletions(-)
New commits:
commit 114df45068fad339dc06508633e3aaee2e000e07
Author: Mike Thompson <mithomps(a)redhat.com>
Date: Wed Jul 3 15:33:11 2013 -0700
Second iteration at new Multi-resource graph using d3 instead of nvd3.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java
index f81fb4f..0178f93 100644
--- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java
+++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java
@@ -171,7 +171,7 @@ public class StackedBarMetricGraphImpl extends AbstractMetricGraph {
lowBound = determineLowBound(min, peak);
highBound = peak + ((peak - min) * 0.1);
oobMax = $wnd.d3.max(chartContext.data.map(function (d) {
- if (d.baselineMax == undefined) {
+ if (typeof d.baselineMax === 'undefined') {
return 0;
}
else {
@@ -216,7 +216,7 @@ public class StackedBarMetricGraphImpl extends AbstractMetricGraph {
.attr("height", height + margin.top - titleHeight - titleSpace + margin.bottom)
.attr("transform", "translate(" + margin.left + "," + (+titleHeight + titleSpace + margin.top) + ")");
- legendUnDefined = (typeof min === undefined) || (typeof avg === undefined) || (typeof peak === undefined);
+ legendUnDefined = (typeof min === 'undefined') || (typeof avg === 'undefined') || (typeof peak === 'undefined');
if (!useSmallCharts() && !legendUnDefined) {
createMinAvgPeakSidePanel(chartContext.minChartTitle, min, chartContext.avgChartTitle, avg, chartContext.peakChartTitle, peak, chartContext.yAxisUnits);
}
@@ -711,7 +711,7 @@ public class StackedBarMetricGraphImpl extends AbstractMetricGraph {
}; // end public closure
}();
- if(chartContext.data !== undefined && chartContext.data.length > 0){
+ if(typeof chartContext.data !== 'undefined' && chartContext.data.length > 0){
metricStackedBarGraph.draw(chartContext);
}
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java
index 1ca63ce..7a4232a 100644
--- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java
+++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3GraphListView.java
@@ -339,7 +339,7 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl
}
public int getChartHeight() {
- return chartHeight != null ? chartHeight : 210;
+ return chartHeight != null ? chartHeight : 300;
}
public void setChartHeight(Integer chartHeight) {
@@ -369,7 +369,7 @@ public abstract class CompositeGroupD3GraphListView extends EnhancedVLayout impl
for (MultiLineGraphData multiLineGraphData : measurementForEachResource) {
sb.append("{ \"key\": \"");
sb.append(multiLineGraphData.getResourceName());
- sb.append("\",\"values\" : ");
+ sb.append("\",\"value\" : ");
sb.append(produceInnerValuesArray(multiLineGraphData.getMeasurementData()));
sb.append("},");
}
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java
index 9d595a7..be6d110 100644
--- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java
+++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/CompositeGroupD3MultiLineGraph.java
@@ -92,11 +92,6 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie
adjustedChartHeight = chartContext.chartHeight - 50,
height = adjustedChartHeight - margin.top - margin.bottom,
titleHeight = 30, titleSpace = 10,
- chartData,
- lowBound,
- min, high,
- newLow = 0,
- highBound,
yScale,
yAxis,
timeScale,
@@ -104,40 +99,21 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie
chart,
svg;
- // adjust the min scale so blue low line is not in axis
- function determineLowBound(min, peak) {
- //var newLow = min - ((peak - min) * 0.1);
- newLow = min;
- if (newLow < 0) {
- return 0;
- }
- else {
- return newLow;
- }
- }
-
function determineScale() {
var xTicks, xTickSubDivide;
- console.log("DetermineScale!");
+ console.log("DetermineScale for # resources: "+ chartContext.data.length);
if (chartContext.data.length > 0) {
xTicks = 8;
xTickSubDivide = 5;
- chartData = chartContext.data;
- min = $wnd.d3.min(function (d) {
- return d.y;
- });
- high = $wnd.d3.max(function (d) {
- return d.y;
- });
yScale = $wnd.d3.scale.linear()
.clamp(true)
.rangeRound([height, 0])
- .domain([$wnd.d3.min(chartContext.data, function (d) {
+ .domain([$wnd.d3.min(chartContext.data[0], function (d) {
return d.y;
- }), $wnd.d3.max(chartContext.data, function (d) {
+ }), $wnd.d3.max(chartContext.data[0], function (d) {
return d.y;
})]);
@@ -151,7 +127,7 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie
timeScale = $wnd.d3.time.scale()
.range([0, width])
- .domain($wnd.d3.extent(chartData, function (d) {
+ .domain($wnd.d3.extent(chartContext.data[0], function (d) {
return d.x;
}));
@@ -187,33 +163,33 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie
}
function createXandYAxes() {
-// var customTimeFormat = timeFormat([
-// [$wnd.d3.time.format("%Y"), function () {
-// return true;
-// }],
-// [$wnd.d3.time.format("%B"), function (d) {
-// return d.getMonth();
-// }],
-// [$wnd.d3.time.format("%b %d"), function (d) {
-// return d.getDate() != 1;
-// }],
-// [$wnd.d3.time.format("%a %d"), function (d) {
-// return d.getDay() && d.getDate() != 1;
-// }],
-// [$wnd.d3.time.format(chartContext.chartXaxisTimeFormatHours), function (d) {
-// return d.getHours();
-// }],
-// [$wnd.d3.time.format(chartContext.chartXaxisTimeFormatHoursMinutes), function (d) {
-// return d.getMinutes();
-// }],
-// [$wnd.d3.time.format(":%S"), function (d) {
-// return d.getSeconds();
-// }],
-// [$wnd.d3.time.format(".%L"), function (d) {
-// return d.getMilliseconds();
-// }]
-// ]);
-// xAxis.tickFormat(customTimeFormat);
+ var customTimeFormat = timeFormat([
+ [$wnd.d3.time.format("%Y"), function () {
+ return true;
+ }],
+ [$wnd.d3.time.format("%B"), function (d) {
+ return d.getMonth();
+ }],
+ [$wnd.d3.time.format("%b %d"), function (d) {
+ return d.getDate() != 1;
+ }],
+ [$wnd.d3.time.format("%a %d"), function (d) {
+ return d.getDay() && d.getDate() != 1;
+ }],
+ [$wnd.d3.time.format(chartContext.chartXaxisTimeFormatHours), function (d) {
+ return d.getHours();
+ }],
+ [$wnd.d3.time.format(chartContext.chartXaxisTimeFormatHoursMinutes), function (d) {
+ return d.getMinutes();
+ }],
+ [$wnd.d3.time.format(":%S"), function (d) {
+ return d.getSeconds();
+ }],
+ [$wnd.d3.time.format(".%L"), function (d) {
+ return d.getMilliseconds();
+ }]
+ ]);
+ xAxis.tickFormat(customTimeFormat);
// create x-axis
svg.append("g")
@@ -269,8 +245,8 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie
}
- function createAvgLines() {
- var barAvgLine = $wnd.d3.svg.line()
+ function createMultiLines(chartContext) {
+ var graphLine = $wnd.d3.svg.line()
.interpolate("linear")
.x(function (d) {
return timeScale(d.x);
@@ -279,15 +255,16 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie
return yScale(d.y);
});
- // Bar avg line
- svg.append("path")
- .datum(chartData)
- .attr("class", "barAvgLine")
+ chart.selectAll(".multiLine")
+ .data(chartContext.data)
+ .enter()
+ .append('path')
+ .attr("class", "multiLine")
.attr("fill", "none")
.attr("stroke", "#2e376a")
.attr("stroke-width", "1.5")
- .attr("stroke-opacity", ".7")
- .attr("d", barAvgLine);
+ .attr("stroke-opacity", ".9")
+ .attr("d", function(d) { return graphLine(d.value);});
}
@@ -305,7 +282,7 @@ public class CompositeGroupD3MultiLineGraph extends CompositeGroupD3GraphListVie
createHeader(chartContext.chartTitle);
console.log("created multi-header");
createYAxisGridLines();
- createAvgLines();
+ createMultiLines(chartContext);
createXandYAxes();
}
}
10 years, 10 months
[rhq] 2 commits - modules/common modules/enterprise modules/plugins
by John Sanda
modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java | 2
modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java | 2
modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java | 2
modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java | 6
modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java | 34 +-
modules/common/cassandra-schema/src/main/resources/topology/0001.xml | 5
modules/common/cassandra-schema/src/main/resources/topology/0002.xml | 26 -
modules/common/cassandra-schema/src/main/resources/topology/create/0001.xml | 5
modules/common/cassandra-schema/src/main/resources/topology/create/0002.xml | 26 +
modules/common/cassandra-schema/src/main/resources/topology/update/0001.xml | 9
modules/common/cassandra-schema/src/main/resources/topology/update/0002.xml | 26 +
modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java | 2
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java | 20 -
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java | 10
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java | 164 +++++-----
modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java | 2
16 files changed, 202 insertions(+), 139 deletions(-)
New commits:
commit 95a9f222e3d047fe335fee7c2d305c7ffe18518b
Author: John Sanda <jsanda(a)redhat.com>
Date: Wed Jul 3 14:54:03 2013 -0400
big refactoring of the work that is done when a new storage node is committed to inventory
Here is a run down of the changes.
* fix endpoint comparisons
There was a bug in StorageModeMaintenanceJob.waitForClustering where it was
comparing the string form of an ip address against a PropertySimple.toString.
It should be comparing the value of the property.
* schedule the addNodeMaintenance op as a group operation
StorageNodeMaintenanceJob now schedules the operations for each storage node as
a group operation instead of as individual operations. This allows us to remove
a lot of code around waiting for operations to complete before scheduling the
next one. More importantly, the previous implementation was not blocking until
each operation completed which resulted in repair operations running across
multiple nodes simultenously. We definitely want to run repair on the nodes
serially. Scheduling the work as a group operation handles that for us.
Scheduling the work as a group operation required a slight change to
StorageNodeManagerBean.linkResource. The resource has to be added to the group
before the quartz job is scheduled. Logic needs to be added to verify that the
node has actually joined to cluster before adding it to the group.
* add logic to detect when repair needs to run
There is logic in place now to determine whether or not repair needs to run.
Previously, we would run repair against each node whenever
StorageNodeMaintenanceJob would run. We only want to run repair if and when we
have to since it is a very resource intensive operation.
* Add logic back to update replication_factor of system_auth keyspace
I had previously changed the logic in TopologyManager to *not* update the RF of
the system_auth keyspace. For an multi-node installation, we would increase the
RF of system_auth because the change was made after the rhqadmin user was
created. Without running repair this results in inconsistent reads which in
turns leads to failed authentication. When StorageNodeMaintenanceJob runs we do
want to update the RF of both the system_auth and rhq keysapces. I have
refactored TopologyManager so that system_auth gets updated.
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java b/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java
index d307f0b..a86c49e 100644
--- a/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java
+++ b/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java
@@ -159,7 +159,7 @@ public class CCMSuiteDeploymentExtension implements LoadableExtension {
try {
schemaManager.install();
clusterInitService.waitForSchemaAgreement(nodes);
- schemaManager.updateTopology();
+ schemaManager.updateTopology(true);
} catch (Exception e) {
if (null != ccm) {
ccm.shutdownCluster();
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java b/modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java
index f50535c..b84018f 100644
--- a/modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java
+++ b/modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java
@@ -74,7 +74,7 @@ public class DeployMojo extends AbstractMojo {
try {
schemaManager.install();
- schemaManager.updateTopology();
+ schemaManager.updateTopology(true);
} catch (Exception e) {
throw new MojoExecutionException("Schema installation failed.", e);
}
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java b/modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java
index a9292f7..38d5337 100644
--- a/modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java
+++ b/modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java
@@ -129,7 +129,7 @@ public class CCMTestNGListener implements IInvokedMethodListener {
if (annotation.waitForSchemaAgreement()) {
clusterInitService.waitForSchemaAgreement(nodes);
}
- schemaManager.updateTopology();
+ schemaManager.updateTopology(true);
}
private void shutdownCluster() throws Exception {
diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java
index 8f8c47e..8f67ab3 100644
--- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java
+++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java
@@ -93,9 +93,9 @@ public class SchemaManager {
version.drop();
}
- public boolean updateTopology() throws Exception {
+ public boolean updateTopology(boolean isNewSchema) throws Exception {
TopologyManager topology = new TopologyManager(username, password, nodes);
- return topology.updateTopology();
+ return topology.updateTopology(isNewSchema);
}
private static List<StorageNode> parseNodeInformation(String... nodes) {
@@ -139,7 +139,7 @@ public class SchemaManager {
} else if ("drop".equalsIgnoreCase(command)) {
schemaManager.drop();
} else if ("topology".equalsIgnoreCase(command)) {
- schemaManager.updateTopology();
+ schemaManager.updateTopology(true);
} else {
throw new IllegalArgumentException(command + " not available.");
}
diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java
index 850c383..fd987a1 100644
--- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java
+++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java
@@ -55,8 +55,12 @@ public class TopologyManager extends AbstractManager {
this.file = file;
}
- protected String getFile() {
- return TOPOLOGY_BASE_FOLDER + "/" + this.file;
+ protected String getFile(boolean isNewSchema) {
+ if (isNewSchema) {
+ return TOPOLOGY_BASE_FOLDER + "/create/" + this.file;
+ }
+
+ return TOPOLOGY_BASE_FOLDER + "/update/" + this.file;
}
}
@@ -64,14 +68,14 @@ public class TopologyManager extends AbstractManager {
super(username, password, nodes);
}
- public boolean updateTopology() throws Exception {
+ public boolean updateTopology(boolean isNewSchema) throws Exception {
boolean result = false;
initCluster();
if (schemaExists()) {
log.info("Applying topology updates...");
- result = this.updateReplicationFactor(nodes.size());
- this.updateGCGrace(nodes.size());
+ result = this.updateReplicationFactor(isNewSchema, nodes.size());
+ this.updateGCGrace(isNewSchema, nodes.size());
} else {
log.info("Topology updates cannot be applied because the schema is not installed.");
}
@@ -80,7 +84,7 @@ public class TopologyManager extends AbstractManager {
return result;
}
- private boolean updateReplicationFactor(int numberOfNodes) throws Exception {
+ private boolean updateReplicationFactor(boolean isNewSchema, int numberOfNodes) throws Exception {
log.info("Starting to execute " + Task.UpdateReplicationFactor + " task.");
int replicationFactor = 1;
@@ -97,19 +101,19 @@ public class TopologyManager extends AbstractManager {
return false;
}
- log.info("Applying file " + Task.UpdateReplicationFactor.getFile() + " for " + Task.UpdateReplicationFactor
- + " task.");
- for (String query : this.getSteps(Task.UpdateReplicationFactor.getFile())) {
+ log.info("Applying file " + Task.UpdateReplicationFactor.getFile(isNewSchema) + " for " +
+ Task.UpdateReplicationFactor + " task.");
+ for (String query : this.getSteps(Task.UpdateReplicationFactor.getFile(isNewSchema))) {
executedPreparedStatement(query, replicationFactor);
}
- log.info("File " + Task.UpdateReplicationFactor.getFile() + " applied for " + Task.UpdateReplicationFactor
- + " task.");
+ log.info("File " + Task.UpdateReplicationFactor.getFile(isNewSchema) + " applied for " +
+ Task.UpdateReplicationFactor + " task.");
log.info("Successfully executed " + Task.UpdateReplicationFactor + " task.");
return true;
}
- private boolean updateGCGrace(int numberOfNodes) throws Exception {
+ private boolean updateGCGrace(boolean isNewSchema, int numberOfNodes) throws Exception {
log.info("Starting to execute " + Task.UpdateGCGrace + " task.");
int gcGraceSeconds = 864000;
@@ -120,11 +124,11 @@ public class TopologyManager extends AbstractManager {
}
- log.info("Applying file " + Task.UpdateGCGrace.getFile() + " for " + Task.UpdateGCGrace + " task.");
- for (String query : this.getSteps(Task.UpdateGCGrace.getFile())) {
+ log.info("Applying file " + Task.UpdateGCGrace.getFile(isNewSchema) + " for " + Task.UpdateGCGrace + " task.");
+ for (String query : this.getSteps(Task.UpdateGCGrace.getFile(isNewSchema))) {
executedPreparedStatement(query, gcGraceSeconds);
}
- log.info("File " + Task.UpdateGCGrace.getFile() + " applied for " + Task.UpdateGCGrace + " task.");
+ log.info("File " + Task.UpdateGCGrace.getFile(isNewSchema) + " applied for " + Task.UpdateGCGrace + " task.");
log.info("Successfully executed " + Task.UpdateGCGrace + " task.");
return true;
diff --git a/modules/common/cassandra-schema/src/main/resources/topology/0001.xml b/modules/common/cassandra-schema/src/main/resources/topology/0001.xml
deleted file mode 100644
index 5cbd7eb..0000000
--- a/modules/common/cassandra-schema/src/main/resources/topology/0001.xml
+++ /dev/null
@@ -1,5 +0,0 @@
-<updatePlan>
- <step>
- ALTER KEYSPACE rhq WITH replication = {'class': 'SimpleStrategy', 'replication_factor': %s};
- </step>
-</updatePlan>
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/main/resources/topology/0002.xml b/modules/common/cassandra-schema/src/main/resources/topology/0002.xml
deleted file mode 100644
index d631030..0000000
--- a/modules/common/cassandra-schema/src/main/resources/topology/0002.xml
+++ /dev/null
@@ -1,26 +0,0 @@
-<updatePlan>
- <step>
- ALTER COLUMNFAMILY rhq.metrics_index WITH gc_grace_seconds = %s;
- </step>
-
- <step>
- ALTER COLUMNFAMILY rhq.raw_metrics WITH gc_grace_seconds = %s;
- </step>
-
- <step>
- ALTER COLUMNFAMILY rhq.one_hour_metrics WITH gc_grace_seconds = %s;
- </step>
-
- <step>
- ALTER COLUMNFAMILY rhq.six_hour_metrics WITH gc_grace_seconds = %s;
- </step>
-
- <step>
- ALTER COLUMNFAMILY rhq.twenty_four_hour_metrics WITH gc_grace_seconds = %s;
- </step>
-
- <step>
- ALTER COLUMNFAMILY rhq.schema_version WITH gc_grace_seconds = %s;
- </step>
-
-</updatePlan>
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/main/resources/topology/create/0001.xml b/modules/common/cassandra-schema/src/main/resources/topology/create/0001.xml
new file mode 100644
index 0000000..5cbd7eb
--- /dev/null
+++ b/modules/common/cassandra-schema/src/main/resources/topology/create/0001.xml
@@ -0,0 +1,5 @@
+<updatePlan>
+ <step>
+ ALTER KEYSPACE rhq WITH replication = {'class': 'SimpleStrategy', 'replication_factor': %s};
+ </step>
+</updatePlan>
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/main/resources/topology/create/0002.xml b/modules/common/cassandra-schema/src/main/resources/topology/create/0002.xml
new file mode 100644
index 0000000..d631030
--- /dev/null
+++ b/modules/common/cassandra-schema/src/main/resources/topology/create/0002.xml
@@ -0,0 +1,26 @@
+<updatePlan>
+ <step>
+ ALTER COLUMNFAMILY rhq.metrics_index WITH gc_grace_seconds = %s;
+ </step>
+
+ <step>
+ ALTER COLUMNFAMILY rhq.raw_metrics WITH gc_grace_seconds = %s;
+ </step>
+
+ <step>
+ ALTER COLUMNFAMILY rhq.one_hour_metrics WITH gc_grace_seconds = %s;
+ </step>
+
+ <step>
+ ALTER COLUMNFAMILY rhq.six_hour_metrics WITH gc_grace_seconds = %s;
+ </step>
+
+ <step>
+ ALTER COLUMNFAMILY rhq.twenty_four_hour_metrics WITH gc_grace_seconds = %s;
+ </step>
+
+ <step>
+ ALTER COLUMNFAMILY rhq.schema_version WITH gc_grace_seconds = %s;
+ </step>
+
+</updatePlan>
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/main/resources/topology/update/0001.xml b/modules/common/cassandra-schema/src/main/resources/topology/update/0001.xml
new file mode 100644
index 0000000..f2c0e57
--- /dev/null
+++ b/modules/common/cassandra-schema/src/main/resources/topology/update/0001.xml
@@ -0,0 +1,9 @@
+<updatePlan>
+ <step>
+ ALTER KEYSPACE system_auth WITH replication = {'class': 'SimpleStrategy', 'replication_factor': %s};
+ </step>
+
+ <step>
+ ALTER KEYSPACE rhq WITH replication = {'class': 'SimpleStrategy', 'replication_factor': %s};
+ </step>
+</updatePlan>
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/main/resources/topology/update/0002.xml b/modules/common/cassandra-schema/src/main/resources/topology/update/0002.xml
new file mode 100644
index 0000000..d631030
--- /dev/null
+++ b/modules/common/cassandra-schema/src/main/resources/topology/update/0002.xml
@@ -0,0 +1,26 @@
+<updatePlan>
+ <step>
+ ALTER COLUMNFAMILY rhq.metrics_index WITH gc_grace_seconds = %s;
+ </step>
+
+ <step>
+ ALTER COLUMNFAMILY rhq.raw_metrics WITH gc_grace_seconds = %s;
+ </step>
+
+ <step>
+ ALTER COLUMNFAMILY rhq.one_hour_metrics WITH gc_grace_seconds = %s;
+ </step>
+
+ <step>
+ ALTER COLUMNFAMILY rhq.six_hour_metrics WITH gc_grace_seconds = %s;
+ </step>
+
+ <step>
+ ALTER COLUMNFAMILY rhq.twenty_four_hour_metrics WITH gc_grace_seconds = %s;
+ </step>
+
+ <step>
+ ALTER COLUMNFAMILY rhq.schema_version WITH gc_grace_seconds = %s;
+ </step>
+
+</updatePlan>
\ No newline at end of file
diff --git a/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java b/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java
index fbd869b..4c87f70 100644
--- a/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java
+++ b/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java
@@ -487,7 +487,7 @@ public class InstallerServiceImpl implements InstallerService {
}
log("Install RHQ schema along with updates to Cassandra.");
storageNodeSchemaManager.install();
- storageNodeSchemaManager.updateTopology();
+ storageNodeSchemaManager.updateTopology(true);
} else {
log("Ignoring Cassandra schema - installer will assume it exists and is already up-to-date.");
}
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
index 90d9497..874be1e 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
@@ -39,6 +39,7 @@ import javax.persistence.TypedQuery;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.quartz.JobDataMap;
import org.quartz.SimpleTrigger;
import org.quartz.Trigger;
@@ -200,7 +201,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
this.updateStorageNodes(storageNodeMap);
if (clusterMaintenanceNeeded) {
- this.scheduleQuartzJob();
+ this.scheduleQuartzJob(existingStorageNodes.size());
}
return new ArrayList<StorageNode>(storageNodeMap.values());
@@ -214,6 +215,10 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
String configAddress = resourceConfig.getSimpleValue(RHQ_STORAGE_ADDRESS_PROPERTY);
if (configAddress != null) {
+ // TODO Do not add the node to the group until we have verified it has joined the cluster
+ // StorageNodeMaintenanceJob currently determines if a new node has successfully joined the cluster.
+ addStorageNodeToGroup(resource);
+
boolean storageNodeFound = false;
if (storageNodes != null) {
for (StorageNode storageNode : storageNodes) {
@@ -239,10 +244,8 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
entityManager.persist(storageNode);
- scheduleQuartzJob();
+ scheduleQuartzJob(storageNodes.size());
}
-
- addStorageNodeToGroup(resource);
}
}
@@ -306,13 +309,14 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
* @return The storage node resource group.
* @throws IllegalStateException if the group is not found or does not exist.
*/
- private ResourceGroup getStorageNodeGroup() {
+ public ResourceGroup getStorageNodeGroup() {
Subject overlord = subjectManager.getOverlord();
ResourceGroupCriteria criteria = new ResourceGroupCriteria();
criteria.addFilterResourceTypeName(STORAGE_NODE_RESOURCE_TYPE_NAME);
criteria.addFilterPluginName(STORAGE_NODE_PLUGIN_NAME);
criteria.addFilterName(STORAGE_NODE_GROUP_NAME);
+ criteria.fetchExplicitResources(true);
List<ResourceGroup> groups = resourceGroupManager.findResourceGroupsByCriteria(overlord, criteria);
@@ -472,7 +476,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
return newNodes;
}
- private void scheduleQuartzJob() {
+ private void scheduleQuartzJob(int clusterSize) {
String jobName = StorageNodeMaintenanceJob.class.getName();
String jobGroupName = StorageNodeMaintenanceJob.class.getName();
String triggerName = StorageNodeMaintenanceJob.class.getName();
@@ -482,6 +486,10 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
trigger.setJobName(jobName);
trigger.setJobGroup(jobGroupName);
try {
+ JobDataMap jobDataMap = new JobDataMap();
+ jobDataMap.put(StorageNodeMaintenanceJob.JOB_DATA_PROPERTY_CLUSTER_SIZE, Integer.toString(clusterSize));
+ trigger.setJobDataMap(jobDataMap);
+
quartzScheduler.scheduleJob(trigger);
} catch (Throwable t) {
log.warn("Unable to schedule storage node maintenance job", t);
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java
index 54646ec..52e2424 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java
@@ -27,6 +27,7 @@ import org.rhq.core.domain.cloud.StorageNode;
import org.rhq.core.domain.cloud.StorageNodeLoadComposite;
import org.rhq.core.domain.criteria.StorageNodeCriteria;
import org.rhq.core.domain.resource.Resource;
+import org.rhq.core.domain.resource.group.ResourceGroup;
import org.rhq.core.domain.util.PageList;
@Local
@@ -86,4 +87,13 @@ public interface StorageNodeManagerLocal {
*/
void runReadRepair();
+ /**
+ * This method assumes the storage node resource group already exists; as such, it should only be called from places
+ * in the code that are after the point(s) where the group has been created.
+ *
+ * @return The storage node resource group.
+ * @throws IllegalStateException if the group is not found or does not exist.
+ */
+ ResourceGroup getStorageNodeGroup();
+
}
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java
index f492fa0..6b1940d 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java
@@ -24,6 +24,7 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.quartz.JobDataMap;
import org.quartz.JobExecutionContext;
import org.quartz.JobExecutionException;
@@ -36,13 +37,11 @@ import org.rhq.core.domain.configuration.PropertyList;
import org.rhq.core.domain.configuration.PropertyMap;
import org.rhq.core.domain.configuration.PropertySimple;
import org.rhq.core.domain.criteria.ResourceCriteria;
-import org.rhq.core.domain.operation.OperationRequestStatus;
-import org.rhq.core.domain.operation.ResourceOperationHistory;
-import org.rhq.core.domain.operation.bean.ResourceOperationSchedule;
+import org.rhq.core.domain.operation.bean.GroupOperationSchedule;
import org.rhq.core.domain.resource.Resource;
-import org.rhq.core.domain.util.PageControl;
-import org.rhq.core.domain.util.PageList;
+import org.rhq.core.domain.resource.group.ResourceGroup;
import org.rhq.core.util.StringUtil;
+import org.rhq.enterprise.server.auth.SubjectManagerLocal;
import org.rhq.enterprise.server.cloud.StorageNodeManagerLocal;
import org.rhq.enterprise.server.operation.OperationManagerLocal;
import org.rhq.enterprise.server.util.LookupUtil;
@@ -58,6 +57,10 @@ public class StorageNodeMaintenanceJob extends AbstractStatefulJob {
private final Log log = LogFactory.getLog(StorageNodeMaintenanceJob.class);
+ public static final String JOB_DATA_PROPERTY_CLUSTER_SIZE = "clusterSize";
+
+ public static final String JOB_DATA_PROPERTY_TOPOLOGY_CHANGED = "topologyChanged";
+
private final static int MAX_ITERATIONS = 5;
private final static int TIMEOUT = 10000;
private final static String STORAGE_SERVICE = "Storage Service";
@@ -73,7 +76,10 @@ public class StorageNodeMaintenanceJob extends AbstractStatefulJob {
private static final String PASSWORD_PROP = "rhq.cassandra.password";
@Override
- public void executeJobCode(JobExecutionContext arg0) throws JobExecutionException {
+ public void executeJobCode(JobExecutionContext context) throws JobExecutionException {
+ JobDataMap jobDataMap = context.getMergedJobDataMap();
+ int clusterSize = Integer.parseInt(jobDataMap.getString(JOB_DATA_PROPERTY_CLUSTER_SIZE));
+
//1. Wait for resouces to be linked to node storage nodes
waitForResouceLinks();
@@ -84,21 +90,50 @@ public class StorageNodeMaintenanceJob extends AbstractStatefulJob {
//3. Wait for the all storage nodes to be part of the same cluster
storageNodes = waitForClustering(storageNodes);
- //4. Update topology
- boolean topologyUpdated = updateTopology(storageNodes);
-
- //5. Run repair operation on all the storage nodes if topology(replication factor was updated)
- if (topologyUpdated) {
- List<String> seedList = new ArrayList<String>();
- for (StorageNode storageNode : storageNodes) {
- seedList.add(storageNode.getAddress());
+ boolean isReadRepairNeeded;
+
+ if (clusterSize >= 4) {
+ // At 4 nodes we increase the RF to 3. We are not increasing the RF beyond
+ // that for additional nodes; so, there is no need to run repair if we are
+ // expanding from a 4 node cluster since the RF remains the same.
+ isReadRepairNeeded = false;
+ } else if (clusterSize == 1) {
+ // The RF will increase since we are going from a single to a multi-node
+ // cluster; therefore, we want to run repair.
+ isReadRepairNeeded = true;
+ } else if (clusterSize == 2) {
+ if (storageNodes.size() > 3) {
+ // If we go from 2 to > 3 nodes we will increase the RF to 3; therefore
+ // we want to run repair.
+ isReadRepairNeeded = true;
+ } else {
+ // If we go from 2 to 3 nodes, we keep the RF at 2 so there is no need
+ // to run repair.
+ isReadRepairNeeded = false;
}
+ } else if (clusterSize == 3) {
+ // We are increasing the cluster size > 3 which means the RF will be
+ // updated to 3; therefore, we want to run repair.
+ isReadRepairNeeded = true;
+ } else {
+ // If we cluster size of zero, then something is really screwed up. It
+ // should always be > 0.
+ log.error("The job data property [" + JOB_DATA_PROPERTY_CLUSTER_SIZE + "] should always be greater " +
+ "than zero. This may be a bug in the code that scheduled this job.");
+ isReadRepairNeeded = storageNodes.size() > 1;
+ }
- for (StorageNode storageNode : storageNodes) {
- Resource resource = storageNode.getResource();
- runNodeMaintenance(resource, seedList);
- }
+ if (isReadRepairNeeded) {
+ updateTopology(storageNodes);
+ }
+
+ //5. run maintenance on each node
+ List<String> seedList = new ArrayList<String>();
+ for (StorageNode storageNode : storageNodes) {
+ seedList.add(storageNode.getAddress());
}
+
+ runNodeMaintenance(seedList, isReadRepairNeeded);
}
private boolean updateTopology(List<StorageNode> storageNodes) throws JobExecutionException {
@@ -106,9 +141,9 @@ public class StorageNodeMaintenanceJob extends AbstractStatefulJob {
String password = getRequiredStorageProperty(PASSWORD_PROP);
SchemaManager schemaManager = new SchemaManager(username, password, storageNodes);
try{
- return schemaManager.updateTopology();
+ return schemaManager.updateTopology(false);
} catch (Exception e) {
- log.error(e);
+ log.error("An error occurred while applying schema topology changes", e);
}
return false;
@@ -147,7 +182,7 @@ public class StorageNodeMaintenanceJob extends AbstractStatefulJob {
List<Property> actualList = propertyList.getList();
for (Property property : actualList) {
PropertyMap map = (PropertyMap) property;
- endpoints.add(map.get(ENDPOINT_PROPERTY).toString());
+ endpoints.add(map.getSimpleValue(ENDPOINT_PROPERTY, null));
}
} catch (Exception e) {
log.error("Error fetching live configuration for resource " + resource.getId());
@@ -157,7 +192,7 @@ public class StorageNodeMaintenanceJob extends AbstractStatefulJob {
}
}
} catch (Exception e) {
- log.error(e);
+ log.error("An exception occurred while waiting for nodes to cluster", e);
}
Collections.sort(endpoints);
@@ -184,67 +219,38 @@ public class StorageNodeMaintenanceJob extends AbstractStatefulJob {
return storageNodes;
}
- private void runNodeMaintenance(Resource resource, List<String> seedList) {
+ private void runNodeMaintenance(List<String> seedList, boolean runRepair) {
OperationManagerLocal operationManager = LookupUtil.getOperationManager();
+ StorageNodeManagerLocal storageNodeManager = LookupUtil.getStorageNodeManager();
+ SubjectManagerLocal subjectManager = LookupUtil.getSubjectManager();
+
+ ResourceGroup storageNodeGroup = storageNodeManager.getStorageNodeGroup();
+
+ GroupOperationSchedule schedule = new GroupOperationSchedule();
+ schedule.setGroup(storageNodeGroup);
+ schedule.setHaltOnFailure(false);
+ schedule.setExecutionOrder(new ArrayList<Resource>(storageNodeGroup.getExplicitResources()));
+ schedule.setJobTrigger(JobTrigger.createNowTrigger());
+ schedule.setSubject(subjectManager.getOverlord());
+ schedule.setOperationName(MAINTENANCE_OPERATION);
+ schedule.setDescription(MAINTENANCE_OPERATION_NOTE);
+
+ List<Property> properties = new ArrayList<Property>();
+ properties.add(new PropertySimple(RUN_REPAIR_PROPERTY, runRepair));
+ properties.add(new PropertySimple(UPDATE_SEEDS_LIST, Boolean.TRUE));
+
+ PropertyList seedListProperty = new PropertyList(SEEDS_LIST);
+ for (String seed : seedList) {
+ seedListProperty.add(new PropertySimple("seed", seed));
+ }
+ properties.add(seedListProperty);
- try {
- ResourceOperationSchedule newSchedule = new ResourceOperationSchedule();
- newSchedule.setJobTrigger(JobTrigger.createNowTrigger());
- newSchedule.setResource(resource);
- newSchedule.setOperationName(MAINTENANCE_OPERATION);
- newSchedule.setDescription(MAINTENANCE_OPERATION_NOTE);
-
- List<Property> properties = new ArrayList<Property>();
- properties.add(new PropertySimple(RUN_REPAIR_PROPERTY, Boolean.TRUE));
- properties.add(new PropertySimple(UPDATE_SEEDS_LIST, Boolean.TRUE));
-
- PropertyList seedListProperty = new PropertyList(SEEDS_LIST);
- for (String seed : seedList) {
- seedListProperty.add(new PropertySimple("seed", seed));
- }
- properties.add(seedListProperty);
-
- Configuration config = new Configuration();
- config.setProperties(properties);
- newSchedule.setParameters(config);
-
- long operationStartTime = System.currentTimeMillis();
- operationManager.scheduleResourceOperation(LookupUtil.getSubjectManager().getOverlord(), newSchedule);
-
- int iteration = 0;
- boolean resultFound = false;
- while (iteration < MAX_ITERATIONS && !resultFound) {
- PageList<ResourceOperationHistory> results = operationManager.findCompletedResourceOperationHistories(
- LookupUtil.getSubjectManager().getOverlord(), resource.getId(), operationStartTime, null,
- PageControl.getUnlimitedInstance());
-
- for (ResourceOperationHistory operationHistory : results) {
- if (MAINTENANCE_OPERATION.equals(operationHistory.getOperationDefinition().getName())) {
- if (OperationRequestStatus.SUCCESS.equals(operationHistory.getStatus())) {
- Configuration operationResults = operationHistory.getResults();
- if ("true".equals(operationResults.getSimpleValue(SUCCEED_PROPERTY))) {
- resultFound = true;
- }
- }
- }
- }
-
- if (resultFound) {
- break;
- } else {
- try {
- Thread.sleep(TIMEOUT);
- } catch (Exception e) {
- log.error(e);
- }
- }
+ Configuration config = new Configuration();
+ config.setProperties(properties);
- iteration++;
- }
+ schedule.setParameters(config);
- } catch (Exception e) {
- log.error(e);
- }
+ operationManager.scheduleGroupOperation(subjectManager.getOverlord(), schedule);
}
private List<StorageNode> getOnlyResourceLinkedStorageNodes() {
commit a39e88e38e73d07f7ee7ac6374c78b566ea9c5b4
Author: John Sanda <jsanda(a)redhat.com>
Date: Tue Jul 2 17:01:15 2013 -0400
use ProcessInfo.freshSnapshot during avail checks to avoid stale data
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
index 7d06cb2..93d758c 100644
--- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
+++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
@@ -135,7 +135,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
return UNKNOWN;
} else {
// It is safe to read prior snapshot as getNativeProcess always return a fresh instance
- ProcessInfoSnapshot processInfoSnaphot = processInfo.priorSnaphot();
+ ProcessInfoSnapshot processInfoSnaphot = processInfo.freshSnapshot();
if (processInfoSnaphot.isRunning()) {
return UP;
} else {
10 years, 10 months
[rhq] modules/plugins
by Thomas Segismont
modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatServerComponent.java | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
New commits:
commit ace32579675ccf0f02dce110d669b57261a70afb
Author: Thomas Segismont <tsegismo(a)redhat.com>
Date: Wed Jul 3 17:05:01 2013 +0200
Revert "Bug 863502 - EMS ConnectionFactory.discoverServerClasses can throw OutOfMemoryError"
This reverts commit faf0013f511894eb2c7b9ef14f87b5e87d657154.
diff --git a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatServerComponent.java b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatServerComponent.java
index c289069..f1e2c51 100644
--- a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatServerComponent.java
+++ b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatServerComponent.java
@@ -195,11 +195,10 @@ public class TomcatServerComponent<T extends ResourceComponent<?>> implements JM
// to have a version compatible local install and set the install path to the local path, even though
// the server url was remote.
String catalinaHome = pluginConfig.getSimpleValue(PLUGIN_CONFIG_CATALINA_HOME_PATH, null);
- boolean hasLocalJars = catalinaHome != null && new File(catalinaHome, "lib").isDirectory();
+ boolean hasLocalJars = new File(catalinaHome).isDirectory();
if (hasLocalJars) {
- String libDir = new File(catalinaHome, "lib").getAbsolutePath();
- connectionSettings.setLibraryURI(libDir);
+ connectionSettings.setLibraryURI(catalinaHome);
connectionFactory.discoverServerClasses(connectionSettings);
// Tell EMS to make copies of jar files so that the ems classloader doesn't lock
10 years, 10 months
[rhq] modules/plugins
by Thomas Segismont
modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatServerComponent.java | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
New commits:
commit faf0013f511894eb2c7b9ef14f87b5e87d657154
Author: Thomas Segismont <tsegismo(a)redhat.com>
Date: Wed Jul 3 14:01:00 2013 +0200
Bug 863502 - EMS ConnectionFactory.discoverServerClasses can throw OutOfMemoryError
Changed the libraryURI root: the plugin was looking for catalina jars from the CATALINA_HOME directory, instead of CATALINA_HOME/lib.
Now users can put any number of files in CATALINA_HOME, provided they're outside the lib directory.
diff --git a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatServerComponent.java b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatServerComponent.java
index f1e2c51..c289069 100644
--- a/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatServerComponent.java
+++ b/modules/plugins/tomcat/src/main/java/org/jboss/on/plugins/tomcat/TomcatServerComponent.java
@@ -195,10 +195,11 @@ public class TomcatServerComponent<T extends ResourceComponent<?>> implements JM
// to have a version compatible local install and set the install path to the local path, even though
// the server url was remote.
String catalinaHome = pluginConfig.getSimpleValue(PLUGIN_CONFIG_CATALINA_HOME_PATH, null);
- boolean hasLocalJars = new File(catalinaHome).isDirectory();
+ boolean hasLocalJars = catalinaHome != null && new File(catalinaHome, "lib").isDirectory();
if (hasLocalJars) {
- connectionSettings.setLibraryURI(catalinaHome);
+ String libDir = new File(catalinaHome, "lib").getAbsolutePath();
+ connectionSettings.setLibraryURI(libDir);
connectionFactory.discoverServerClasses(connectionSettings);
// Tell EMS to make copies of jar files so that the ems classloader doesn't lock
10 years, 10 months
[rhq] Changes to 'bug/923400'
by Thomas Segismont
New branch 'bug/923400' available with the following commits:
commit d9135011a516e746a287a2ca3d3cffc16c7a6c9d
Author: Thomas Segismont <tsegismo(a)redhat.com>
Date: Wed Jul 3 12:27:55 2013 +0200
Bug 923400 - Sigar creates high number of blocked threads (unbounded) if mount is gone
10 years, 10 months
[rhq] Changes to 'mtho11/d3-multiline-graph'
by mike thompson
New branch 'mtho11/d3-multiline-graph' available with the following commits:
commit 638b20247ed0727930e7f0ddebac07bef7e6cc79
Author: Mike Thompson <mithomps(a)redhat.com>
Date: Tue Jul 2 14:33:21 2013 -0700
First pass at new Multi-resource graph using d3 instead of nvd3.
10 years, 10 months
[rhq] modules/enterprise
by mike thompson
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MeasurementTableView.java | 2 ++
1 file changed, 2 insertions(+)
New commits:
commit 6b238ccea5445907414fc1c087a832df01b97673
Author: Mike Thompson <mithomps(a)redhat.com>
Date: Tue Jul 2 11:13:23 2013 -0700
Put back in Date Range control on MeasurementTableView until consolidated metrics tab is ready.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MeasurementTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MeasurementTableView.java
index 1d456f4..7022648 100644
--- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MeasurementTableView.java
+++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MeasurementTableView.java
@@ -42,6 +42,7 @@ import org.rhq.core.domain.measurement.MeasurementUnits;
import org.rhq.core.domain.resource.composite.ResourceComposite;
import org.rhq.core.domain.util.PageList;
import org.rhq.enterprise.gui.coregui.client.CoreGUI;
+import org.rhq.enterprise.gui.coregui.client.components.measurement.UserPreferencesMeasurementRangeEditor;
import org.rhq.enterprise.gui.coregui.client.components.table.Table;
import org.rhq.enterprise.gui.coregui.client.components.table.TableAction;
import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup;
@@ -69,6 +70,7 @@ public class MeasurementTableView extends Table<MetricsTableDataSource> {
protected void configureTable() {
ArrayList<ListGridField> fields = getDataSource().getListGridFields();
setListGridFields(fields.toArray(new ListGridField[0]));
+ addExtraWidget(new UserPreferencesMeasurementRangeEditor(), true);
addTableAction(MSG.view_measureTable_getLive(), new TableAction() {
@Override
public boolean isEnabled(ListGridRecord[] selection) {
10 years, 10 months
[rhq] modules/enterprise
by mazz
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/install/remote/SSHFileSend.java | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
New commits:
commit 70db4e046ee011b5766d5bce6d1890f4f2d8391f
Author: John Mazzitelli <mazz(a)redhat.com>
Date: Tue Jul 2 13:48:30 2013 -0400
BZ 976227 - strip the file name from a Windows path
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/install/remote/SSHFileSend.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/install/remote/SSHFileSend.java
index ece76af..7eb513a 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/install/remote/SSHFileSend.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/install/remote/SSHFileSend.java
@@ -22,16 +22,16 @@
*/
package org.rhq.enterprise.server.install.remote;
-import com.jcraft.jsch.Channel;
-import com.jcraft.jsch.ChannelExec;
-import com.jcraft.jsch.Session;
-
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
+import com.jcraft.jsch.Channel;
+import com.jcraft.jsch.ChannelExec;
+import com.jcraft.jsch.Session;
+
/**
* @author Greg Hinkle
*/
@@ -61,6 +61,8 @@ public class SSHFileSend {
command = "C0644 " + filesize + " ";
if (sourceFilename.lastIndexOf('/') > 0) {
command += sourceFilename.substring(sourceFilename.lastIndexOf('/') + 1);
+ } else if (sourceFilename.lastIndexOf('\\') > 0) {
+ command += sourceFilename.substring(sourceFilename.lastIndexOf('\\') + 1);
} else {
command += sourceFilename;
}
10 years, 10 months
[rhq] modules/core
by Thomas Segismont
modules/core/plugin-container/src/test/java/org/rhq/core/pc/inventory/ResourceContainerTest.java | 33 ++++++----
1 file changed, 20 insertions(+), 13 deletions(-)
New commits:
commit 2f9a152ab91427d6d99a5e1a0cbe05a52f9963d9
Author: Thomas Segismont <tsegismo(a)redhat.com>
Date: Tue Jul 2 17:47:38 2013 +0200
Bug 966777 - EAP 6 plug-in is using a hard-coded operation timeout for start and stop instead of using the operation timeout or agent's default operation timeout of 10 minutes
Fix ResourceContainerTest
diff --git a/modules/core/plugin-container/src/test/java/org/rhq/core/pc/inventory/ResourceContainerTest.java b/modules/core/plugin-container/src/test/java/org/rhq/core/pc/inventory/ResourceContainerTest.java
index 75e2d51..d02447e 100644
--- a/modules/core/plugin-container/src/test/java/org/rhq/core/pc/inventory/ResourceContainerTest.java
+++ b/modules/core/plugin-container/src/test/java/org/rhq/core/pc/inventory/ResourceContainerTest.java
@@ -38,9 +38,9 @@ import org.rhq.core.domain.measurement.AvailabilityType;
import org.rhq.core.domain.resource.Resource;
import org.rhq.core.pc.PluginContainer;
import org.rhq.core.pc.PluginContainerConfiguration;
+import org.rhq.core.pc.component.ComponentInvocationContextImpl;
import org.rhq.core.pc.util.FacetLockType;
import org.rhq.core.pluginapi.availability.AvailabilityFacet;
-import org.rhq.core.pluginapi.component.ComponentInvocationContext;
import org.rhq.core.pluginapi.inventory.ResourceComponent;
import org.rhq.core.pluginapi.inventory.ResourceContext;
import org.rhq.core.pluginapi.operation.OperationFacet;
@@ -149,42 +149,49 @@ public class ResourceContainerTest {
OperationFacet proxy = resourceContainer.createResourceComponentProxy(OperationFacet.class,
FacetLockType.WRITE, 150, true, false, true);
try {
- proxy.invokeOperation("op", new Configuration());
- fail("Expected invokeOperation to throw a TimeoutException");
+ OperationResult op = proxy.invokeOperation("op", new Configuration());
+ assertTrue(op.getSimpleResult().equals(MockResourceComponent.OPERATION_RESULT));
} catch (Exception e) {
- assertFalse(e instanceof TimeoutException, "Caught unexpected instance of TimeoutException: "
- + e.getClass().getName());
+ fail("Caught unexpected Exception: " + e.getClass().getName());
assertFalse(((MockResourceComponent) resourceContainer.getResourceComponent())
.caughtInterruptedComponentInvocation());
}
}
- private ResourceContainer getResourceContainer() {
+ private ResourceContainer getResourceContainer() throws Exception {
Resource resource = new Resource("TestPlatformKey", "MyTestPlatform", PluginMetadataManager.TEST_PLATFORM_TYPE);
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
ResourceContainer resourceContainer = new ResourceContainer(resource, contextClassLoader);
+ ResourceContext resourceContext = new ResourceContext(resource, null, null, null, null, null, null, null, null,
+ null, null, null, null, null, new ComponentInvocationContextImpl());
+ resourceContainer.setResourceContext(resourceContext);
ResourceComponent resourceComponent = new MockResourceComponent(false);
resourceContainer.setResourceComponent(resourceComponent);
+ resourceComponent.start(resourceContext);
return resourceContainer;
}
- class MockResourceComponent implements ResourceComponent, OperationFacet {
- private boolean naughty;
- private boolean caughtInterruptedComponentInvocation;
- private ResourceContext resourceContext;
+ private class MockResourceComponent implements ResourceComponent, OperationFacet {
+ static final String OPERATION_RESULT = "uninterrupted";
+ boolean naughty;
+ boolean caughtInterruptedComponentInvocation;
+ ResourceContext resourceContext;
MockResourceComponent(boolean naughty) {
this.naughty = naughty;
}
+ @Override
public void start(ResourceContext resourceContext) throws Exception {
this.resourceContext = resourceContext;
}
+ @Override
public void stop() {
this.resourceContext = null;
}
+ @Override
public AvailabilityType getAvailability() {
if (this.naughty) {
throw new MockRuntimeException();
@@ -212,16 +219,16 @@ public class ResourceContainerTest {
long start = System.nanoTime();
while (!resourceContext.getComponentInvocationContext().isInterrupted()) {
if ((System.nanoTime() - start) > MILLISECONDS.toNanos(100)) {
- // Make the operation fail after 100ms
+ // Return after 100ms
caughtInterruptedComponentInvocation = false;
- throw new Exception();
+ return new OperationResult(OPERATION_RESULT);
}
}
caughtInterruptedComponentInvocation = true;
throw new InterruptedException();
}
- public boolean caughtInterruptedComponentInvocation() {
+ boolean caughtInterruptedComponentInvocation() {
return caughtInterruptedComponentInvocation;
}
}
10 years, 10 months