[rhq] Branch 'bug/801926' - modules/common
by lkrejci
modules/common/ant-bundle/src/test/java/org/rhq/bundle/ant/AntLauncherTest.java | 2
modules/common/ant-bundle/src/test/resources/test-bundle-v2-commonSubdirectories.xml | 65 ----------
modules/common/ant-bundle/src/test/resources/test-bundle-v2-filesAndDirectories.xml | 65 ++++++++++
3 files changed, 66 insertions(+), 66 deletions(-)
New commits:
commit 3440e0033ca3e3b34e9a40e9ee85d33472575f9b
Author: Lukas Krejci <lkrejci(a)redhat.com>
Date: Fri Aug 2 10:24:02 2013 +0200
Updating the test recipes for the new names of compliance.
diff --git a/modules/common/ant-bundle/src/test/java/org/rhq/bundle/ant/AntLauncherTest.java b/modules/common/ant-bundle/src/test/java/org/rhq/bundle/ant/AntLauncherTest.java
index 22aa418..6073be9 100644
--- a/modules/common/ant-bundle/src/test/java/org/rhq/bundle/ant/AntLauncherTest.java
+++ b/modules/common/ant-bundle/src/test/java/org/rhq/bundle/ant/AntLauncherTest.java
@@ -303,7 +303,7 @@ public class AntLauncherTest {
}
public void testUpgradeNoManageRootDir() throws Exception {
- testUpgradeNoManageRootDir(true, "test-bundle-v2-commonSubdirectories.xml");
+ testUpgradeNoManageRootDir(true, "test-bundle-v2-filesAndDirectories.xml");
}
private void testUpgradeNoManageRootDir(boolean validate, String recipeFile) throws Exception {
diff --git a/modules/common/ant-bundle/src/test/resources/test-bundle-v2-commonSubdirectories.xml b/modules/common/ant-bundle/src/test/resources/test-bundle-v2-commonSubdirectories.xml
deleted file mode 100644
index 3a82a3d..0000000
--- a/modules/common/ant-bundle/src/test/resources/test-bundle-v2-commonSubdirectories.xml
+++ /dev/null
@@ -1,65 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
- ~ RHQ Management Platform
- ~ Copyright (C) 2013 Red Hat, Inc.
- ~ All rights reserved.
- ~
- ~ This program is free software; you can redistribute it and/or modify
- ~ it under the terms of the GNU General Public License as published by
- ~ the Free Software Foundation version 2 of the License.
- ~
- ~ This program is distributed in the hope that it will be useful,
- ~ but WITHOUT ANY WARRANTY; without even the implied warranty of
- ~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- ~ GNU General Public License for more details.
- ~
- ~ You should have received a copy of the GNU General Public License
- ~ along with this program; if not, write to the Free Software
- ~ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- -->
-
-<project name="test-bundle" default="main" xmlns:rhq="antlib:org.rhq.bundle">
-
- <rhq:bundle name="example.com (JBoss EAP 4.3)" version="2.5"
- description="updated bundle">
-
- <rhq:input-property
- name="listener.port"
- description="This is where the product will listen for incoming messages"
- required="true"
- defaultValue="9090"
- type="integer"/>
-
- <rhq:deployment-unit name="appserver" preinstallTarget="preinstall" postinstallTarget="postinstall"
- compliance="commonDirectories"> <!-- this is the only difference with test-bundle-v2.xml -->
- <rhq:system-service name="foo" scriptFile="foo-script"
- configFile="foo-config" overwriteScript="true"
- startLevels="3,4,5" startPriority="80" stopPriority="20" root="root"/>
- <rhq:file name="test-v2.properties" destinationFile="subdir/test.properties" replace="true"/>
- <rhq:archive name="file.zip">
- <rhq:replace>
- <rhq:fileset includes="**/*.properties"/>
- </rhq:replace>
- </rhq:archive>
- <!-- the files that should be ignored during upgrades -->
- <rhq:ignore>
- <rhq:fileset includes="*.log"/>
- </rhq:ignore>
- </rhq:deployment-unit>
-
- </rhq:bundle>
-
- <target name="main"/>
-
- <target name="preinstall">
- <echo>Deploying Test Bundle v2.5 to ${rhq.deploy.dir}...</echo>
- <property name="preinstallTargetExecuted" value="2a"/>
- </target>
-
- <target name="postinstall">
- <echo>Done deploying Test Bundle v2.5 to ${rhq.deploy.dir}.</echo>
- <property name="postinstallTargetExecuted" value="2b"/>
- </target>
-
-</project>
diff --git a/modules/common/ant-bundle/src/test/resources/test-bundle-v2-filesAndDirectories.xml b/modules/common/ant-bundle/src/test/resources/test-bundle-v2-filesAndDirectories.xml
new file mode 100644
index 0000000..b82da55
--- /dev/null
+++ b/modules/common/ant-bundle/src/test/resources/test-bundle-v2-filesAndDirectories.xml
@@ -0,0 +1,65 @@
+<?xml version="1.0"?>
+
+<!--
+ ~ RHQ Management Platform
+ ~ Copyright (C) 2013 Red Hat, Inc.
+ ~ All rights reserved.
+ ~
+ ~ This program is free software; you can redistribute it and/or modify
+ ~ it under the terms of the GNU General Public License as published by
+ ~ the Free Software Foundation version 2 of the License.
+ ~
+ ~ This program is distributed in the hope that it will be useful,
+ ~ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ ~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ ~ GNU General Public License for more details.
+ ~
+ ~ You should have received a copy of the GNU General Public License
+ ~ along with this program; if not, write to the Free Software
+ ~ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ -->
+
+<project name="test-bundle" default="main" xmlns:rhq="antlib:org.rhq.bundle">
+
+ <rhq:bundle name="example.com (JBoss EAP 4.3)" version="2.5"
+ description="updated bundle">
+
+ <rhq:input-property
+ name="listener.port"
+ description="This is where the product will listen for incoming messages"
+ required="true"
+ defaultValue="9090"
+ type="integer"/>
+
+ <rhq:deployment-unit name="appserver" preinstallTarget="preinstall" postinstallTarget="postinstall"
+ compliance="filesAndDirectories"> <!-- this is the only difference with test-bundle-v2.xml -->
+ <rhq:system-service name="foo" scriptFile="foo-script"
+ configFile="foo-config" overwriteScript="true"
+ startLevels="3,4,5" startPriority="80" stopPriority="20" root="root"/>
+ <rhq:file name="test-v2.properties" destinationFile="subdir/test.properties" replace="true"/>
+ <rhq:archive name="file.zip">
+ <rhq:replace>
+ <rhq:fileset includes="**/*.properties"/>
+ </rhq:replace>
+ </rhq:archive>
+ <!-- the files that should be ignored during upgrades -->
+ <rhq:ignore>
+ <rhq:fileset includes="*.log"/>
+ </rhq:ignore>
+ </rhq:deployment-unit>
+
+ </rhq:bundle>
+
+ <target name="main"/>
+
+ <target name="preinstall">
+ <echo>Deploying Test Bundle v2.5 to ${rhq.deploy.dir}...</echo>
+ <property name="preinstallTargetExecuted" value="2a"/>
+ </target>
+
+ <target name="postinstall">
+ <echo>Done deploying Test Bundle v2.5 to ${rhq.deploy.dir}.</echo>
+ <property name="postinstallTargetExecuted" value="2b"/>
+ </target>
+
+</project>
10 years, 9 months
[rhq] modules/enterprise
by mike thompson
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/AbstractD3GraphListView.java | 6
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java | 4
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/D3GroupGraphListView.java | 11
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/GroupMetricsTableDataSource.java | 5
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/MetricsTableDataSource.java | 333 +++++++++
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/D3GraphListView.java | 14
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableDataSource.java | 335 ----------
7 files changed, 349 insertions(+), 359 deletions(-)
New commits:
commit 6a6485acff97d1073015d6aab0d52c5fb4022895
Author: Mike Thompson <mithomps(a)redhat.com>
Date: Thu Aug 1 14:09:03 2013 -0700
Small Graph subsystem refactoring.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/AbstractD3GraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/AbstractD3GraphListView.java
index d9f27b1..2ce63aa 100644
--- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/AbstractD3GraphListView.java
+++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/AbstractD3GraphListView.java
@@ -45,13 +45,13 @@ import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPre
public abstract class AbstractD3GraphListView extends EnhancedVLayout implements AutoRefresh,RedrawGraphs {
protected final static int SINGLE_CHART_HEIGHT = 225;
protected final static int MULTI_CHART_HEIGHT = 210;
- protected static Label loadingLabel = new Label(MSG.common_msg_loading());
+ protected static final Label loadingLabel = new Label(MSG.common_msg_loading());
protected List<Availability> availabilityList;
protected List<ResourceGroupAvailability> groupAvailabilityList;
protected AvailabilityD3GraphView availabilityGraph;
- protected MeasurementUserPreferences measurementUserPrefs;
+ protected final MeasurementUserPreferences measurementUserPrefs;
protected boolean showAvailabilityGraph = false;
- protected ButtonBarDateTimeRangeEditor buttonBarDateTimeRangeEditor;
+ protected final ButtonBarDateTimeRangeEditor buttonBarDateTimeRangeEditor;
protected Timer refreshTimer;
public AbstractD3GraphListView() {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java
index fb8e096..678724a 100644
--- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java
+++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/AvailabilitySummaryPieGraphType.java
@@ -92,8 +92,8 @@ public class AvailabilitySummaryPieGraphType {
console.log("Draw Availability Summary Pie Chart");
var global = this,
- w = 100,
- h = 100,
+ w = @org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilitySummaryPieGraphType::WIDTH,
+ h = @org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilitySummaryPieGraphType::HEIGHT,
r = h / 2,
color = $wnd.d3.scale.category10(),
data = global.@org.rhq.enterprise.gui.coregui.client.inventory.common.graph.graphtype.AvailabilitySummaryPieGraphType::getAvailabilitySummaryJson()(),
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/D3GroupGraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/D3GroupGraphListView.java
index a9789c6..16670aa 100644
--- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/D3GroupGraphListView.java
+++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/D3GroupGraphListView.java
@@ -54,7 +54,7 @@ import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch;
*/
public final class D3GroupGraphListView extends AbstractD3GraphListView implements AutoRefresh {
- private ResourceGroup resourceGroup;
+ private final ResourceGroup resourceGroup;
private VLayout graphsVLayout;
public D3GroupGraphListView(ResourceGroup resourceGroup, boolean monitorDetailView) {
@@ -67,7 +67,6 @@ public final class D3GroupGraphListView extends AbstractD3GraphListView implemen
@Override
protected void onDraw() {
super.onDraw();
-
destroyMembers();
addMember(buttonBarDateTimeRangeEditor);
@@ -81,9 +80,7 @@ public final class D3GroupGraphListView extends AbstractD3GraphListView implemen
graphsVLayout.setWidth100();
graphsVLayout.setHeight100();
- if (resourceGroup != null) {
- buildGraphs();
- }
+ buildGraphs();
addMember(graphsVLayout);
}
@@ -196,9 +193,7 @@ public final class D3GroupGraphListView extends AbstractD3GraphListView implemen
graphView.setWidth("95%");
graphView.setHeight(MULTI_CHART_HEIGHT);
- if (graphsVLayout != null) {
- graphsVLayout.addMember(graphView);
- }
+ graphsVLayout.addMember(graphView);
}
}
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/GroupMetricsTableDataSource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/GroupMetricsTableDataSource.java
index 0b7b0bf..92221c5 100644
--- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/GroupMetricsTableDataSource.java
+++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/GroupMetricsTableDataSource.java
@@ -22,7 +22,6 @@ import org.rhq.core.domain.resource.group.composite.ResourceGroupComposite;
import org.rhq.enterprise.gui.coregui.client.CoreGUI;
import org.rhq.enterprise.gui.coregui.client.UserSessionManager;
import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup;
-import org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.table.MetricsTableDataSource;
import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository;
import org.rhq.enterprise.gui.coregui.client.util.MeasurementUtility;
import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPreferences;
@@ -149,8 +148,8 @@ public class GroupMetricsTableDataSource extends MetricsTableDataSource {
//now retrieve metric display sumamries
GWTServiceLookup.getMeasurementChartsService().getMetricDisplaySummariesForCompatibleGroup(groupId,
- definitionArrayIds, Long.valueOf(range.get(0)).longValue(),
- Long.valueOf(range.get(1)).longValue(), false,
+ definitionArrayIds, range.get(0),
+ range.get(1), false,
new AsyncCallback<ArrayList<MetricDisplaySummary>>() {
@Override
public void onSuccess(ArrayList<MetricDisplaySummary> result) {
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/MetricsTableDataSource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/MetricsTableDataSource.java
new file mode 100644
index 0000000..ef71ab7
--- /dev/null
+++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/groups/detail/monitoring/table/MetricsTableDataSource.java
@@ -0,0 +1,333 @@
+package org.rhq.enterprise.gui.coregui.client.inventory.groups.detail.monitoring.table;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Set;
+
+import com.google.gwt.user.client.Timer;
+import com.google.gwt.user.client.rpc.AsyncCallback;
+import com.smartgwt.client.data.DSRequest;
+import com.smartgwt.client.data.DSResponse;
+import com.smartgwt.client.data.Record;
+import com.smartgwt.client.widgets.grid.CellFormatter;
+import com.smartgwt.client.widgets.grid.ListGridField;
+import com.smartgwt.client.widgets.grid.ListGridRecord;
+
+import org.rhq.core.domain.criteria.Criteria;
+import org.rhq.core.domain.criteria.ResourceCriteria;
+import org.rhq.core.domain.measurement.DataType;
+import org.rhq.core.domain.measurement.MeasurementDefinition;
+import org.rhq.core.domain.measurement.MeasurementSchedule;
+import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite;
+import org.rhq.core.domain.measurement.ui.MetricDisplaySummary;
+import org.rhq.core.domain.measurement.ui.MetricDisplayValue;
+import org.rhq.core.domain.resource.Resource;
+import org.rhq.core.domain.resource.ResourceType;
+import org.rhq.core.domain.resource.composite.ResourceComposite;
+import org.rhq.core.domain.util.PageList;
+import org.rhq.enterprise.gui.coregui.client.CoreGUI;
+import org.rhq.enterprise.gui.coregui.client.UserSessionManager;
+import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup;
+import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository;
+import org.rhq.enterprise.gui.coregui.client.util.BrowserUtility;
+import org.rhq.enterprise.gui.coregui.client.util.Log;
+import org.rhq.enterprise.gui.coregui.client.util.MeasurementUtility;
+import org.rhq.enterprise.gui.coregui.client.util.RPCDataSource;
+import org.rhq.enterprise.gui.coregui.client.util.async.Command;
+import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch;
+import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPreferences;
+
+/**
+ * A simple data source to read in metric data summaries for a resource.
+ * This doesn't support paging - everything is returned in one query. Since
+ * the number of metrics per resource is relatively small (never more than tens of them),
+ * we just load them all in at once.
+ *
+ * @author John Mazzitelli
+ * @author Mike Thompson
+ */
+public class MetricsTableDataSource extends RPCDataSource<MetricDisplaySummary, Criteria> {
+
+ public static final String FIELD_SPARKLINE = "sparkline";
+ public static final String FIELD_METRIC_LABEL = "label";
+ public static final String FIELD_ALERT_COUNT = "alertCount";
+ public static final String FIELD_MIN_VALUE = "min";
+ public static final String FIELD_MAX_VALUE = "max";
+ public static final String FIELD_AVG_VALUE = "avg";
+ public static final String FIELD_LAST_VALUE = "last";
+ public static final String FIELD_METRIC_DEF_ID = "defId";
+ public static final String FIELD_METRIC_SCHED_ID = "schedId";
+ public static final String FIELD_METRIC_UNITS = "units";
+ public static final String FIELD_METRIC_NAME = "name";
+ public static final String FIELD_RESOURCE_ID = "resourceId";
+ private int resourceId;
+ private List<MetricDisplaySummary> metricDisplaySummaries;
+ private List<List<MeasurementDataNumericHighLowComposite>> metricsDataList;
+ private MeasurementUserPreferences measurementUserPrefs;
+
+ public MetricsTableDataSource(int resourceId) {
+ this.resourceId = resourceId;
+ measurementUserPrefs = new MeasurementUserPreferences(UserSessionManager.getUserPreferences());
+ }
+
+ /**
+ * The view that contains the list grid which will display this datasource's data will call this
+ * method to get the field information which is used to control the display of the data.
+ *
+ * @return list grid fields used to display the datasource data
+ */
+ public ArrayList<ListGridField> getListGridFields() {
+ ArrayList<ListGridField> fields = new ArrayList<ListGridField>(7);
+
+ ListGridField sparklineField = new ListGridField(FIELD_SPARKLINE, "chart");
+ sparklineField.setCellFormatter(new CellFormatter() {
+ @Override
+ public String format(Object value, ListGridRecord record, int rowNum, int colNum) {
+ if (value == null) {
+ return "";
+ }
+ String contents = "<span id='sparkline_" + resourceId + "-"
+ + record.getAttributeAsInt(FIELD_METRIC_DEF_ID) + "' class='dynamicsparkline' width='70' "
+ + "values='" + record.getAttribute(FIELD_SPARKLINE) + "'></span>";
+ return contents;
+
+ }
+ });
+
+ sparklineField.setWidth(80);
+ fields.add(sparklineField);
+
+ ListGridField nameField = new ListGridField(FIELD_METRIC_LABEL, MSG.common_title_name());
+ nameField.setWidth("30%");
+ fields.add(nameField);
+
+ ListGridField alertsField = new ListGridField(FIELD_ALERT_COUNT, MSG.common_title_alerts());
+ alertsField.setWidth("10%");
+ fields.add(alertsField);
+
+ ListGridField minField = new ListGridField(FIELD_MIN_VALUE, MSG.view_resource_monitor_table_min());
+ minField.setWidth("15%");
+ fields.add(minField);
+
+ ListGridField maxField = new ListGridField(FIELD_MAX_VALUE, MSG.view_resource_monitor_table_max());
+ maxField.setWidth("15%");
+ fields.add(maxField);
+
+ ListGridField avgField = new ListGridField(FIELD_AVG_VALUE, MSG.view_resource_monitor_table_avg());
+ avgField.setWidth("15%");
+ fields.add(avgField);
+
+ ListGridField lastField = new ListGridField(FIELD_LAST_VALUE, MSG.view_resource_monitor_table_last());
+ lastField.setWidth("15%");
+ fields.add(lastField);
+
+ return fields;
+ }
+
+ @Override
+ public MetricDisplaySummary copyValues(Record from) {
+ // we should never need this method - we only go in one direction
+ // if we ever need this, just have copyValues store an "object" attribute whose value is "from"
+ // which this method then just reads out. Since we don't need this now, save memory by not
+ // keeping the MetricDisplayValue around
+ return null;
+ }
+
+ @Override
+ public ListGridRecord copyValues(MetricDisplaySummary from) {
+ MeasurementUtility.formatSimpleMetrics(from);
+
+ ListGridRecord record = new ListGridRecord();
+ record.setAttribute(FIELD_SPARKLINE, getCsvMetricsForSparkline());
+ record.setAttribute(FIELD_METRIC_LABEL, from.getLabel());
+ record.setAttribute(FIELD_ALERT_COUNT, String.valueOf(from.getAlertCount()));
+ record.setAttribute(FIELD_MIN_VALUE, getMetricStringValue(from.getMinMetric()));
+ record.setAttribute(FIELD_MAX_VALUE, getMetricStringValue(from.getMaxMetric()));
+ record.setAttribute(FIELD_AVG_VALUE, getMetricStringValue(from.getAvgMetric()));
+ record.setAttribute(FIELD_LAST_VALUE, getMetricStringValue(from.getLastMetric()));
+ record.setAttribute(FIELD_METRIC_DEF_ID, from.getDefinitionId());
+ record.setAttribute(FIELD_METRIC_SCHED_ID, from.getScheduleId());
+ record.setAttribute(FIELD_METRIC_UNITS, from.getUnits());
+ record.setAttribute(FIELD_METRIC_NAME, from.getMetricName());
+ record.setAttribute(FIELD_RESOURCE_ID, resourceId);
+ return record;
+ }
+
+ private String getCsvMetricsForSparkline() {
+ StringBuilder sb = new StringBuilder();
+ Log.debug("getCsvMetricsForSparkline.metricsDataList: " + metricsDataList.size());
+ for (List<MeasurementDataNumericHighLowComposite> measurementData : metricsDataList) {
+ for (int i = 0; i < measurementData.size(); i++) {
+ // take the last 20 values
+ if (i >= measurementData.size() - 20) {
+ if (!Double.isNaN(measurementData.get(i).getValue())) {
+ sb.append((int) measurementData.get(i).getValue());
+ sb.append(",");
+ }
+ }
+ }
+ if (sb.toString().endsWith(",")) {
+ sb.setLength(sb.length() - 1);
+ }
+ }
+ Log.debug("getCsvMetricsForSparkline: " + sb.toString());
+
+ return sb.toString();
+ }
+
+ protected String getMetricStringValue(MetricDisplayValue value) {
+ return (value != null) ? value.toString() : "";
+ }
+
+ @Override
+ protected Criteria getFetchCriteria(DSRequest request) {
+ // we don't use criterias for this datasource, just return null
+ return null;
+ }
+
+ @Override
+ protected void executeFetch(final DSRequest request, final DSResponse response, final Criteria unused) {
+
+ GWTServiceLookup.getMeasurementScheduleService().findSchedulesForResourceAndType(resourceId,
+ DataType.MEASUREMENT, null, true, new AsyncCallback<ArrayList<MeasurementSchedule>>() {
+ @Override
+ public void onSuccess(ArrayList<MeasurementSchedule> measurementSchedules) {
+ int[] scheduleIds = new int[measurementSchedules.size()];
+ int i = 0;
+ for (MeasurementSchedule measurementSchedule : measurementSchedules) {
+ scheduleIds[i++] = measurementSchedule.getId();
+ }
+
+ final CountDownLatch countDownLatch = CountDownLatch.create(2, new Command() {
+
+ @Override
+ public void execute() {
+ response.setData(buildRecords(metricDisplaySummaries));
+ processResponse(request.getRequestId(), response);
+
+ new Timer() {
+
+ @Override
+ public void run() {
+ BrowserUtility.graphSparkLines();
+ }
+ }.schedule(150);
+ Log.debug("Finished CountdownLatch for metrics loaded: " + metricsDataList.size());
+ }
+ });
+
+ retrieveResourceMetrics(resourceId, countDownLatch);
+
+ GWTServiceLookup.getMeasurementChartsService().getMetricDisplaySummariesForResource(resourceId,
+ scheduleIds, measurementUserPrefs.getMetricRangePreferences().begin,
+ measurementUserPrefs.getMetricRangePreferences().end,
+ new AsyncCallback<ArrayList<MetricDisplaySummary>>() {
+ @Override
+ public void onSuccess(ArrayList<MetricDisplaySummary> metricDisplaySummaries) {
+ setMetricDisplaySummaries(metricDisplaySummaries);
+ countDownLatch.countDown();
+ }
+
+ @Override
+ public void onFailure(Throwable caught) {
+ CoreGUI.getErrorHandler().handleError("Cannot load metrics", caught);
+ countDownLatch.countDown();
+ }
+ }
+
+ );
+ }
+
+ @Override
+ public void onFailure(Throwable caught) {
+ CoreGUI.getErrorHandler().handleError("Cannot load schedules", caught);
+ }
+ });
+ }
+
+ void setMetricDisplaySummaries(List<MetricDisplaySummary> metricDisplaySummaries) {
+ this.metricDisplaySummaries = metricDisplaySummaries;
+ }
+
+ public void retrieveResourceMetrics(final Integer resourceId, final CountDownLatch countDownLatch) {
+
+ ResourceCriteria criteria = new ResourceCriteria();
+ criteria.addFilterId(resourceId);
+
+ //locate the resource
+ GWTServiceLookup.getResourceService().findResourceCompositesByCriteria(criteria,
+ new AsyncCallback<PageList<ResourceComposite>>() {
+ @Override
+ public void onFailure(Throwable caught) {
+ Log.debug("Error retrieving resource resource composite for resource [" + resourceId + "]:"
+ + caught.getMessage());
+ }
+
+ @Override
+ public void onSuccess(PageList<ResourceComposite> resourceCompositePageList) {
+ if (!resourceCompositePageList.isEmpty()) {
+ final ResourceComposite resourceComposite = resourceCompositePageList.get(0);
+ final Resource resource = resourceComposite.getResource();
+ // Load the fully fetched ResourceType.
+ ResourceType resourceType = resource.getResourceType();
+ ResourceTypeRepository.Cache.getInstance().getResourceTypes(resourceType.getId(),
+ EnumSet.of(ResourceTypeRepository.MetadataType.measurements),
+ new ResourceTypeRepository.TypeLoadedCallback() {
+ public void onTypesLoaded(ResourceType type) {
+ resource.setResourceType(type);
+ //metric definitions
+ Set<MeasurementDefinition> definitions = type.getMetricDefinitions();
+
+ //build id mapping for measurementDefinition instances Ex. Free Memory -> MeasurementDefinition[100071]
+ final HashMap<String, MeasurementDefinition> measurementDefMap = new HashMap<String, MeasurementDefinition>();
+ for (MeasurementDefinition definition : definitions) {
+ measurementDefMap.put(definition.getDisplayName(), definition);
+ }
+ //bundle definition ids for asynch call.
+ int[] definitionArrayIds = new int[definitions.size()];
+ final String[] displayOrder = new String[definitions.size()];
+ measurementDefMap.keySet().toArray(displayOrder);
+ //sort the charting data ex. Free Memory, Free Swap Space,..System Load
+ Arrays.sort(displayOrder);
+
+ //organize definitionArrayIds for ordered request on server.
+ int index = 0;
+ for (String definitionToDisplay : displayOrder) {
+ definitionArrayIds[index++] = measurementDefMap.get(definitionToDisplay)
+ .getId();
+ }
+
+ GWTServiceLookup.getMeasurementDataService().findDataForResource(resourceId,
+ definitionArrayIds, measurementUserPrefs.getMetricRangePreferences().begin,
+ measurementUserPrefs.getMetricRangePreferences().end, 60,
+ new AsyncCallback<List<List<MeasurementDataNumericHighLowComposite>>>() {
+ @Override
+ public void onFailure(Throwable caught) {
+ Log.warn("Error retrieving recent metrics charting data for resource ["
+ + resourceId + "]:" + caught.getMessage());
+ }
+
+ @Override
+ public void onSuccess(
+ List<List<MeasurementDataNumericHighLowComposite>> measurementDataList) {
+
+ if (!measurementDataList.isEmpty()) {
+ metricsDataList = measurementDataList;
+ Log.debug("*** Setting metricsDataList.size: "
+ + metricsDataList.size());
+ countDownLatch.countDown();
+ }
+ }
+ });
+
+ }
+ });
+ }
+ }
+ });
+
+ }
+}
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/D3GraphListView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/D3GraphListView.java
index 8b9f327..5635d88 100644
--- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/D3GraphListView.java
+++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/D3GraphListView.java
@@ -60,9 +60,9 @@ import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch;
*/
public class D3GraphListView extends AbstractD3GraphListView {
- private static int NUM_ASYNC_CALLS = 2; // wait for X async calls in Latch
+ private static final int NUM_ASYNC_CALLS = 2; // wait for X async calls in Latch
- private Resource resource;
+ private final Resource resource;
private Set<Integer> definitionIds = null;
private boolean useSummaryData = false;
private PageList<MeasurementOOBComposite> measurementOOBCompositeList;
@@ -91,7 +91,7 @@ public class D3GraphListView extends AbstractD3GraphListView {
return D3GraphListView.createSingleGraph(resource, measurementId, false);
}
- protected D3GraphListView(Resource resource, Set<Integer> definitionIds, boolean showAvailabilityGraph) {
+ private D3GraphListView(Resource resource, Set<Integer> definitionIds, boolean showAvailabilityGraph) {
super();
this.resource = resource;
commonConstructorSettings();
@@ -99,7 +99,7 @@ public class D3GraphListView extends AbstractD3GraphListView {
this.showAvailabilityGraph = showAvailabilityGraph;
}
- protected D3GraphListView(Resource resource, boolean showAvailabilityGraph) {
+ private D3GraphListView(Resource resource, boolean showAvailabilityGraph) {
super();
this.resource = resource;
this.showAvailabilityGraph = showAvailabilityGraph;
@@ -130,9 +130,7 @@ public class D3GraphListView extends AbstractD3GraphListView {
vLayout.setWidth100();
vLayout.setHeight100();
- if (resource != null) {
- queryAndBuildGraphs();
- }
+ queryAndBuildGraphs();
addMember(vLayout);
}
@@ -176,7 +174,7 @@ public class D3GraphListView extends AbstractD3GraphListView {
private void queryAndBuildGraphs() {
final long startTimer = System.currentTimeMillis();
- if (null != availabilityGraph) {
+ if (showAvailabilityGraph) {
queryAvailability(EntityContext.forResource(resource.getId()), buttonBarDateTimeRangeEditor.getStartTime(),
buttonBarDateTimeRangeEditor.getEndTime(), null);
}
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableDataSource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableDataSource.java
deleted file mode 100644
index 57e62ab..0000000
--- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableDataSource.java
+++ /dev/null
@@ -1,335 +0,0 @@
-package org.rhq.enterprise.gui.coregui.client.inventory.resource.detail.monitoring.table;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Set;
-
-import com.google.gwt.user.client.Timer;
-import com.google.gwt.user.client.rpc.AsyncCallback;
-import com.smartgwt.client.data.DSRequest;
-import com.smartgwt.client.data.DSResponse;
-import com.smartgwt.client.data.Record;
-import com.smartgwt.client.widgets.grid.CellFormatter;
-import com.smartgwt.client.widgets.grid.ListGridField;
-import com.smartgwt.client.widgets.grid.ListGridRecord;
-
-import org.rhq.core.domain.criteria.Criteria;
-import org.rhq.core.domain.criteria.ResourceCriteria;
-import org.rhq.core.domain.measurement.DataType;
-import org.rhq.core.domain.measurement.MeasurementDefinition;
-import org.rhq.core.domain.measurement.MeasurementSchedule;
-import org.rhq.core.domain.measurement.composite.MeasurementDataNumericHighLowComposite;
-import org.rhq.core.domain.measurement.ui.MetricDisplaySummary;
-import org.rhq.core.domain.measurement.ui.MetricDisplayValue;
-import org.rhq.core.domain.resource.Resource;
-import org.rhq.core.domain.resource.ResourceType;
-import org.rhq.core.domain.resource.composite.ResourceComposite;
-import org.rhq.core.domain.util.PageList;
-import org.rhq.enterprise.gui.coregui.client.CoreGUI;
-import org.rhq.enterprise.gui.coregui.client.UserSessionManager;
-import org.rhq.enterprise.gui.coregui.client.gwt.GWTServiceLookup;
-import org.rhq.enterprise.gui.coregui.client.inventory.resource.type.ResourceTypeRepository;
-import org.rhq.enterprise.gui.coregui.client.util.BrowserUtility;
-import org.rhq.enterprise.gui.coregui.client.util.Log;
-import org.rhq.enterprise.gui.coregui.client.util.MeasurementUtility;
-import org.rhq.enterprise.gui.coregui.client.util.RPCDataSource;
-import org.rhq.enterprise.gui.coregui.client.util.async.Command;
-import org.rhq.enterprise.gui.coregui.client.util.async.CountDownLatch;
-import org.rhq.enterprise.gui.coregui.client.util.preferences.MeasurementUserPreferences;
-
-/**
- * A simple data source to read in metric data summaries for a resource.
- * This doesn't support paging - everything is returned in one query. Since
- * the number of metrics per resource is relatively small (never more than tens of them),
- * we just load them all in at once.
- *
- * @author John Mazzitelli
- * @author Mike Thompson
- * @todo: get rid of this once we have tested the new screen out
- */
-@Deprecated
-public class MetricsTableDataSource extends RPCDataSource<MetricDisplaySummary, Criteria> {
-
- public static final String FIELD_SPARKLINE = "sparkline";
- public static final String FIELD_METRIC_LABEL = "label";
- public static final String FIELD_ALERT_COUNT = "alertCount";
- public static final String FIELD_MIN_VALUE = "min";
- public static final String FIELD_MAX_VALUE = "max";
- public static final String FIELD_AVG_VALUE = "avg";
- public static final String FIELD_LAST_VALUE = "last";
- public static final String FIELD_METRIC_DEF_ID = "defId";
- public static final String FIELD_METRIC_SCHED_ID = "schedId";
- public static final String FIELD_METRIC_UNITS = "units";
- public static final String FIELD_METRIC_NAME = "name";
- public static final String FIELD_RESOURCE_ID = "resourceId";
- private int resourceId;
- private List<MetricDisplaySummary> metricDisplaySummaries;
- private List<List<MeasurementDataNumericHighLowComposite>> metricsDataList;
- private MeasurementUserPreferences measurementUserPrefs;
-
- public MetricsTableDataSource(int resourceId) {
- this.resourceId = resourceId;
- measurementUserPrefs = new MeasurementUserPreferences(UserSessionManager.getUserPreferences());
- }
-
- /**
- * The view that contains the list grid which will display this datasource's data will call this
- * method to get the field information which is used to control the display of the data.
- *
- * @return list grid fields used to display the datasource data
- */
- public ArrayList<ListGridField> getListGridFields() {
- ArrayList<ListGridField> fields = new ArrayList<ListGridField>(7);
-
- ListGridField sparklineField = new ListGridField(FIELD_SPARKLINE, "chart");
- sparklineField.setCellFormatter(new CellFormatter() {
- @Override
- public String format(Object value, ListGridRecord record, int rowNum, int colNum) {
- if (value == null) {
- return "";
- }
- String contents = "<span id='sparkline_" + resourceId + "-"
- + record.getAttributeAsInt(FIELD_METRIC_DEF_ID) + "' class='dynamicsparkline' width='70' "
- + "values='" + record.getAttribute(FIELD_SPARKLINE) + "'></span>";
- return contents;
-
- }
- });
-
- sparklineField.setWidth(80);
- fields.add(sparklineField);
-
- ListGridField nameField = new ListGridField(FIELD_METRIC_LABEL, MSG.common_title_name());
- nameField.setWidth("30%");
- fields.add(nameField);
-
- ListGridField alertsField = new ListGridField(FIELD_ALERT_COUNT, MSG.common_title_alerts());
- alertsField.setWidth("10%");
- fields.add(alertsField);
-
- ListGridField minField = new ListGridField(FIELD_MIN_VALUE, MSG.view_resource_monitor_table_min());
- minField.setWidth("15%");
- fields.add(minField);
-
- ListGridField maxField = new ListGridField(FIELD_MAX_VALUE, MSG.view_resource_monitor_table_max());
- maxField.setWidth("15%");
- fields.add(maxField);
-
- ListGridField avgField = new ListGridField(FIELD_AVG_VALUE, MSG.view_resource_monitor_table_avg());
- avgField.setWidth("15%");
- fields.add(avgField);
-
- ListGridField lastField = new ListGridField(FIELD_LAST_VALUE, MSG.view_resource_monitor_table_last());
- lastField.setWidth("15%");
- fields.add(lastField);
-
- return fields;
- }
-
- @Override
- public MetricDisplaySummary copyValues(Record from) {
- // we should never need this method - we only go in one direction
- // if we ever need this, just have copyValues store an "object" attribute whose value is "from"
- // which this method then just reads out. Since we don't need this now, save memory by not
- // keeping the MetricDisplayValue around
- return null;
- }
-
- @Override
- public ListGridRecord copyValues(MetricDisplaySummary from) {
- MeasurementUtility.formatSimpleMetrics(from);
-
- ListGridRecord record = new ListGridRecord();
- record.setAttribute(FIELD_SPARKLINE, getCsvMetricsForSparkline());
- record.setAttribute(FIELD_METRIC_LABEL, from.getLabel());
- record.setAttribute(FIELD_ALERT_COUNT, String.valueOf(from.getAlertCount()));
- record.setAttribute(FIELD_MIN_VALUE, getMetricStringValue(from.getMinMetric()));
- record.setAttribute(FIELD_MAX_VALUE, getMetricStringValue(from.getMaxMetric()));
- record.setAttribute(FIELD_AVG_VALUE, getMetricStringValue(from.getAvgMetric()));
- record.setAttribute(FIELD_LAST_VALUE, getMetricStringValue(from.getLastMetric()));
- record.setAttribute(FIELD_METRIC_DEF_ID, from.getDefinitionId());
- record.setAttribute(FIELD_METRIC_SCHED_ID, from.getScheduleId());
- record.setAttribute(FIELD_METRIC_UNITS, from.getUnits());
- record.setAttribute(FIELD_METRIC_NAME, from.getMetricName());
- record.setAttribute(FIELD_RESOURCE_ID, resourceId);
- return record;
- }
-
- private String getCsvMetricsForSparkline() {
- StringBuilder sb = new StringBuilder();
- Log.debug("getCsvMetricsForSparkline.metricsDataList: " + metricsDataList.size());
- for (List<MeasurementDataNumericHighLowComposite> measurementData : metricsDataList) {
- for (int i = 0; i < measurementData.size(); i++) {
- // take the last 20 values
- if (i >= measurementData.size() - 20) {
- if (!Double.isNaN(measurementData.get(i).getValue())) {
- sb.append((int) measurementData.get(i).getValue());
- sb.append(",");
- }
- }
- }
- if (sb.toString().endsWith(",")) {
- sb.setLength(sb.length() - 1);
- }
- }
- Log.debug("getCsvMetricsForSparkline: " + sb.toString());
-
- return sb.toString();
- }
-
- protected String getMetricStringValue(MetricDisplayValue value) {
- return (value != null) ? value.toString() : "";
- }
-
- @Override
- protected Criteria getFetchCriteria(DSRequest request) {
- // we don't use criterias for this datasource, just return null
- return null;
- }
-
- @Override
- protected void executeFetch(final DSRequest request, final DSResponse response, final Criteria unused) {
-
- GWTServiceLookup.getMeasurementScheduleService().findSchedulesForResourceAndType(resourceId,
- DataType.MEASUREMENT, null, true, new AsyncCallback<ArrayList<MeasurementSchedule>>() {
- @Override
- public void onSuccess(ArrayList<MeasurementSchedule> measurementSchedules) {
- int[] scheduleIds = new int[measurementSchedules.size()];
- int i = 0;
- for (MeasurementSchedule measurementSchedule : measurementSchedules) {
- scheduleIds[i++] = measurementSchedule.getId();
- }
-
- final CountDownLatch countDownLatch = CountDownLatch.create(2, new Command() {
-
- @Override
- public void execute() {
- response.setData(buildRecords(metricDisplaySummaries));
- processResponse(request.getRequestId(), response);
-
- new Timer() {
-
- @Override
- public void run() {
- BrowserUtility.graphSparkLines();
- }
- }.schedule(150);
- Log.debug("*** Finished CountdownLatch for metrics loaded: " + metricsDataList.size());
- }
- });
-
- retrieveResourceMetrics(resourceId, countDownLatch);
-
- GWTServiceLookup.getMeasurementChartsService().getMetricDisplaySummariesForResource(resourceId,
- scheduleIds, measurementUserPrefs.getMetricRangePreferences().begin,
- measurementUserPrefs.getMetricRangePreferences().end,
- new AsyncCallback<ArrayList<MetricDisplaySummary>>() {
- @Override
- public void onSuccess(ArrayList<MetricDisplaySummary> metricDisplaySummaries) {
- setMetricDisplaySummaries(metricDisplaySummaries);
- countDownLatch.countDown();
- }
-
- @Override
- public void onFailure(Throwable caught) {
- CoreGUI.getErrorHandler().handleError("Cannot load metrics", caught);
- countDownLatch.countDown();
- }
- }
-
- );
- }
-
- @Override
- public void onFailure(Throwable caught) {
- CoreGUI.getErrorHandler().handleError("Cannot load schedules", caught);
- }
- });
- }
-
- void setMetricDisplaySummaries(List<MetricDisplaySummary> metricDisplaySummaries) {
- this.metricDisplaySummaries = metricDisplaySummaries;
- }
-
- public void retrieveResourceMetrics(final Integer resourceId, final CountDownLatch countDownLatch) {
-
- ResourceCriteria criteria = new ResourceCriteria();
- criteria.addFilterId(resourceId);
-
- //locate the resource
- GWTServiceLookup.getResourceService().findResourceCompositesByCriteria(criteria,
- new AsyncCallback<PageList<ResourceComposite>>() {
- @Override
- public void onFailure(Throwable caught) {
- Log.debug("Error retrieving resource resource composite for resource [" + resourceId + "]:"
- + caught.getMessage());
- }
-
- @Override
- public void onSuccess(PageList<ResourceComposite> resourceCompositePageList) {
- if (!resourceCompositePageList.isEmpty()) {
- final ResourceComposite resourceComposite = resourceCompositePageList.get(0);
- final Resource resource = resourceComposite.getResource();
- // Load the fully fetched ResourceType.
- ResourceType resourceType = resource.getResourceType();
- ResourceTypeRepository.Cache.getInstance().getResourceTypes(resourceType.getId(),
- EnumSet.of(ResourceTypeRepository.MetadataType.measurements),
- new ResourceTypeRepository.TypeLoadedCallback() {
- public void onTypesLoaded(ResourceType type) {
- resource.setResourceType(type);
- //metric definitions
- Set<MeasurementDefinition> definitions = type.getMetricDefinitions();
-
- //build id mapping for measurementDefinition instances Ex. Free Memory -> MeasurementDefinition[100071]
- final HashMap<String, MeasurementDefinition> measurementDefMap = new HashMap<String, MeasurementDefinition>();
- for (MeasurementDefinition definition : definitions) {
- measurementDefMap.put(definition.getDisplayName(), definition);
- }
- //bundle definition ids for asynch call.
- int[] definitionArrayIds = new int[definitions.size()];
- final String[] displayOrder = new String[definitions.size()];
- measurementDefMap.keySet().toArray(displayOrder);
- //sort the charting data ex. Free Memory, Free Swap Space,..System Load
- Arrays.sort(displayOrder);
-
- //organize definitionArrayIds for ordered request on server.
- int index = 0;
- for (String definitionToDisplay : displayOrder) {
- definitionArrayIds[index++] = measurementDefMap.get(definitionToDisplay)
- .getId();
- }
-
- GWTServiceLookup.getMeasurementDataService().findDataForResource(resourceId,
- definitionArrayIds, measurementUserPrefs.getMetricRangePreferences().begin,
- measurementUserPrefs.getMetricRangePreferences().end, 60,
- new AsyncCallback<List<List<MeasurementDataNumericHighLowComposite>>>() {
- @Override
- public void onFailure(Throwable caught) {
- Log.warn("Error retrieving recent metrics charting data for resource ["
- + resourceId + "]:" + caught.getMessage());
- }
-
- @Override
- public void onSuccess(
- List<List<MeasurementDataNumericHighLowComposite>> measurementDataList) {
-
- if (!measurementDataList.isEmpty()) {
- metricsDataList = measurementDataList;
- Log.debug("*** Setting metricsDataList.size: "
- + metricsDataList.size());
- countDownLatch.countDown();
- }
- }
- });
-
- }
- });
- }
- }
- });
-
- }
-}
10 years, 9 months
[rhq] Changes to 'bug/801926'
by lkrejci
New branch 'bug/801926' available with the following commits:
commit d31cc6edd91c99aafd6ff7c7a8fe3d3155c0a348
Author: Lukas Krejci <lkrejci(a)redhat.com>
Date: Fri Aug 2 00:54:49 2013 +0200
[BZ 801926] - manageRootDir deprecated, supeseded by "compliance".
The compliance has now 2 possible values:
* full (corresponds to manageRootDir=true, i.e. the default),
* filesAndDirectories (corresponds to manageRootDir=false)
The name "full" should convey the fact that the deployment directory is in
full compliance with the contents of the bundle.
The name "filesAndDirectories" should convey the behavior of
manageRootDir=false - i.e. the files and directories in the root dir that
are not present in the bundle are left intact. When there is a directory or
file in the root directory that is both in the deployment directory and
the bundle, the file or directory is made compliant to the contents in the
bundle.
The other two proposed deployment behaviors are "rootDirectoryAndFiles"
and "files", but those are commented out for the moment, because we don't
plan to add support for them in RHQ 4.9.
10 years, 9 months
[rhq] .classpath modules/common modules/enterprise modules/plugins
by snegrea
.classpath | 2
modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java | 2
modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java | 2
modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java | 2
modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java | 397 +++++-----
modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java | 52 +
modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java | 109 +-
modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFile.java | 235 +++++
modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFolder.java | 141 +++
modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java | 209 +++--
modules/common/cassandra-schema/src/main/resources/management/0001.xml | 25
modules/common/cassandra-schema/src/main/resources/schema/create/0001.xml | 17
modules/common/cassandra-schema/src/main/resources/schema/create/0002.xml | 9
modules/common/cassandra-schema/src/main/resources/schema/drop/0001.xml | 5
modules/common/cassandra-schema/src/main/resources/schema/drop/0002.xml | 5
modules/common/cassandra-schema/src/main/resources/schema/drop/0003.xml | 5
modules/common/cassandra-schema/src/main/resources/topology/0001.xml | 8
modules/common/cassandra-schema/src/main/resources/topology/0002.xml | 25
modules/common/cassandra-schema/src/main/resources/topology/create/0001.xml | 5
modules/common/cassandra-schema/src/main/resources/topology/create/0002.xml | 26
modules/common/cassandra-schema/src/main/resources/topology/update/0001.xml | 9
modules/common/cassandra-schema/src/main/resources/topology/update/0002.xml | 26
modules/common/cassandra-schema/src/test/java/org/rhq/cassandra/schema/UpdateFileTest.java | 170 ++++
modules/common/cassandra-schema/src/test/resources/bad_file_1.xml | 9
modules/common/cassandra-schema/src/test/resources/bad_file_2.xml | 9
modules/common/cassandra-schema/src/test/resources/bad_file_3.xml | 9
modules/common/cassandra-schema/src/test/resources/no_binding.xml | 9
modules/common/cassandra-schema/src/test/resources/no_binding_extra_tags.xml | 12
modules/common/cassandra-schema/src/test/resources/no_binding_named_steps.xml | 9
modules/common/cassandra-schema/src/test/resources/required_binding.xml | 9
modules/common/cassandra-schema/src/test/resources/required_binding_named_steps.xml | 9
modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java | 2
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java | 6
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java | 6
modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java | 2
35 files changed, 1170 insertions(+), 407 deletions(-)
New commits:
commit 92cb23791fc56b03ada9d1dcfb1d48aa469e6677
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Thu Aug 1 13:07:20 2013 -0500
Extensive Cassandra schema manager updates:
1) The entire code was simplified and all queries moved away from code.
2) It is now possible to bind variables to queries (eg. user and password).
3) Simplified the external interface.
4) Only the main interface is now exposed outside of the package.
5) Added unit tests for the file loading and binding code
6) Cleaned the queries and the execution flow
7) The drop restores Cassandra to the original state, pre install
diff --git a/.classpath b/.classpath
index 386316a..cad0bdb 100644
--- a/.classpath
+++ b/.classpath
@@ -216,6 +216,8 @@
<classpathentry kind="src" path="modules/enterprise/server/data-migration/src/main/java"/>
<classpathentry kind="src" path="modules/enterprise/server/data-migration/src/test/java"/>
<classpathentry kind="src" path="modules/helpers/ldap-tool/src/main/java"/>
+ <classpathentry kind="src" path="modules/common/cassandra-schema/src/test/java"/>
+ <classpathentry kind="src" path="modules/plugins/rhq-storage/src/test/java"/>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.7"/>
<classpathentry exported="true" kind="var" path="M2_REPO/org/apache/httpcomponents/httpclient/4.2.3/httpclient-4.2.3.jar" sourcepath="M2_REPO/org/apache/httpcomponents/httpclient/4.2.3/httpclient-4.2.3-sources.jar"/>
<classpathentry exported="true" kind="var" path="M2_REPO/commons-io/commons-io/2.1/commons-io-2.1.jar" sourcepath="M2_REPO/commons-io/commons-io/2.1/commons-io-2.1-sources.jar"/>
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java b/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java
index a86c49e..d307f0b 100644
--- a/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java
+++ b/modules/common/cassandra-ccm/cassandra-ccm-arquillian/src/main/java/org/rhq/cassandra/ccm/arquillian/CCMSuiteDeploymentExtension.java
@@ -159,7 +159,7 @@ public class CCMSuiteDeploymentExtension implements LoadableExtension {
try {
schemaManager.install();
clusterInitService.waitForSchemaAgreement(nodes);
- schemaManager.updateTopology(true);
+ schemaManager.updateTopology();
} catch (Exception e) {
if (null != ccm) {
ccm.shutdownCluster();
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java b/modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java
index b84018f..f50535c 100644
--- a/modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java
+++ b/modules/common/cassandra-ccm/cassandra-ccm-maven-plugin/src/main/java/org/rhq/cassandra/ccm/maven/DeployMojo.java
@@ -74,7 +74,7 @@ public class DeployMojo extends AbstractMojo {
try {
schemaManager.install();
- schemaManager.updateTopology(true);
+ schemaManager.updateTopology();
} catch (Exception e) {
throw new MojoExecutionException("Schema installation failed.", e);
}
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java b/modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java
index 38d5337..a9292f7 100644
--- a/modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java
+++ b/modules/common/cassandra-ccm/cassandra-ccm-testng/src/main/java/org/rhq/cassandra/CCMTestNGListener.java
@@ -129,7 +129,7 @@ public class CCMTestNGListener implements IInvokedMethodListener {
if (annotation.waitForSchemaAgreement()) {
clusterInitService.waitForSchemaAgreement(nodes);
}
- schemaManager.updateTopology(true);
+ schemaManager.updateTopology();
}
private void shutdownCluster() throws Exception {
diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java
index bcf9831..baf7c23 100644
--- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java
+++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/AbstractManager.java
@@ -25,21 +25,9 @@
package org.rhq.cassandra.schema;
-import java.io.BufferedReader;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.net.JarURLConnection;
-import java.net.URL;
import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.Enumeration;
import java.util.List;
-import java.util.jar.JarEntry;
-import java.util.jar.JarFile;
-
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
+import java.util.Properties;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.ProtocolOptions.Compression;
@@ -50,9 +38,6 @@ import com.datastax.driver.core.exceptions.NoHostAvailableException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.w3c.dom.Document;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
import org.rhq.cassandra.util.ClusterBuilder;
import org.rhq.core.domain.cloud.StorageNode;
@@ -61,26 +46,35 @@ import org.rhq.core.util.StringUtil;
/**
* @author Stefan Negrea
*/
-public class AbstractManager {
-
- private static final String UPDATE_PLAN_ELEMENT = "updatePlan";
- private static final String STEP_ELEMENT = "step";
-
- private static final String SCHEMA_EXISTS_QUERY = "SELECT * FROM system.schema_keyspaces WHERE keyspace_name = 'rhq';";
- private static final String VERSION_COLUMNFAMILY_EXISTS_QUERY = "SELECT * from system.schema_columnfamilies WHERE keyspace_name='rhq' AND columnfamily_name='schema_version';";
- private static final String VERSION_QUERY = "SELECT version FROM rhq.schema_version";
- private static final String REPLICATION_FACTOR_QUERY = "SELECT strategy_options FROM system.schema_keyspaces where keyspace_name='rhq';";
-
+abstract class AbstractManager {
+ private static final String MANAGEMENT_BASE_FOLDER = "management";
+ protected static final String DEFAULT_CASSANDRA_USER = "cassandra";
+ protected static final String DEFAULT_CASSANDRA_PASSWORD = "cassandra";
private final Log log = LogFactory.getLog(AbstractManager.class);
- protected Session session;
- protected final String username;
- protected final String password;
- protected List<StorageNode> nodes = new ArrayList<StorageNode>();
+ enum Query {
+ USER_EXISTS,
+ SCHEMA_EXISTS,
+ VERSION_COLUMNFAMILY_EXISTS,
+ VERSION,
+ REPLICATION_FACTOR,
+ INSERT_SCHEMA_VERSION;
+
+ @Override
+ public String toString() {
+ return this.name().toLowerCase();
+ }
+ }
+
+ private Session session;
+ private final String username;
+ private final String password;
+ private List<StorageNode> nodes = new ArrayList<StorageNode>();
+ private final UpdateFile managementTasks;
- public AbstractManager(String username, String password, List<StorageNode> nodes) {
+ protected AbstractManager(String username, String password, List<StorageNode> nodes) {
try {
this.username = username;
this.password = password;
@@ -88,13 +82,106 @@ public class AbstractManager {
} catch (NoHostAvailableException e) {
throw new RuntimeException("Unable create storage node session.", e);
}
+
+ try {
+ UpdateFolder managementFolder = new UpdateFolder(MANAGEMENT_BASE_FOLDER);
+ managementTasks = managementFolder.getUpdateFiles().get(0);
+ } catch (Exception e) {
+ throw new RuntimeException("Unable create storage node session.", e);
+ }
+ }
+
+ /**
+ * Init the Cassandra cluster session with the username and password provided
+ * at creation.
+ */
+ protected void initClusterSession() {
+ initClusterSession(username, password);
+ }
+
+ /**
+ * Init the Cassandra cluster session with provided username and password.
+ *
+ * @param username
+ * @param password
+ */
+ protected void initClusterSession(String username, String password) {
+ shutdownClusterConnection();
+
+ String[] hostNames = new String[nodes.size()];
+ for (int i = 0; i < hostNames.length; ++i) {
+ hostNames[i] = nodes.get(i).getAddress();
+ }
+
+ log.info("Initializing session to connect to " + StringUtil.arrayToString(hostNames));
+
+ Cluster cluster = new ClusterBuilder().addContactPoints(hostNames).withCredentials(username, password)
+ .withPort(nodes.get(0).getCqlPort()).withCompression(Compression.NONE).build();
+
+ log.info("Cluster connection configured.");
+
+ session = cluster.connect("system");
+ log.info("Cluster connected.");
+ }
+
+ /**
+ * Shutdown the Cassandra cluster connection.
+ */
+ protected void shutdownClusterConnection() {
+ log.info("Shutting down existing cluster connections");
+ if (session != null && session.getCluster() != null) {
+ session.getCluster().shutdown();
+ }
+ }
+
+ /**
+ * Get cluster size.
+ *
+ * @return cluster size
+ */
+ protected int getClusterSize() {
+ return nodes.size();
+ }
+
+ /**
+ * @return the username
+ */
+ protected String getUsername() {
+ return username;
+ }
+
+ /**
+ * @return the password
+ */
+ protected String getPassword() {
+ return password;
+ }
+
+ /**
+ * Runs a CQL query to check the existence of the RHQ user
+ *
+ * @return true if the RHQ user exists, false otherwise
+ */
+ protected boolean userExists() {
+ try {
+ ResultSet resultSet = executeManagementQuery(Query.USER_EXISTS, "username", username);
+ return !resultSet.all().isEmpty();
+ } catch (Exception e) {
+ log.error(e);
+ throw new RuntimeException(e);
+ }
}
+ /**
+ * Run a CQL query to check the existence of the RHQ schema
+ *
+ * @return true if the RHQ schema exists, false otherwise
+ */
protected boolean schemaExists() {
try {
- ResultSet resultSet = session.execute(SCHEMA_EXISTS_QUERY);
+ ResultSet resultSet = executeManagementQuery(Query.SCHEMA_EXISTS);
if (!resultSet.all().isEmpty()) {
- resultSet = session.execute(VERSION_COLUMNFAMILY_EXISTS_QUERY);
+ resultSet = executeManagementQuery(Query.VERSION_COLUMNFAMILY_EXISTS);
return !resultSet.all().isEmpty();
}
return false;
@@ -104,10 +191,15 @@ public class AbstractManager {
}
}
+ /**
+ * Run a CQL query to retrieve the current RHQ schema version
+ *
+ * @return current RHQ schema version
+ */
protected int getSchemaVersion() {
int maxVersion = 0;
try {
- ResultSet resultSet = session.execute(VERSION_QUERY);
+ ResultSet resultSet = executeManagementQuery(Query.VERSION);
for (Row row : resultSet.all()) {
if (maxVersion < row.getInt(0)) {
maxVersion = row.getInt(0);
@@ -121,155 +213,140 @@ public class AbstractManager {
return maxVersion;
}
- protected void removeAppliedUpdates(List<String> updateFiles, int currentSchemaVersion) {
- while (!updateFiles.isEmpty()) {
- int version = this.extractVersionFromUpdateFile(updateFiles.get(0));
- if (version <= currentSchemaVersion) {
- updateFiles.remove(0);
- } else {
- break;
- }
+ /**
+ * Calculate the replication factor based on the input cluster size.
+ *
+ * @return calculated replication factor
+ */
+ protected int calculateNewReplicationFactor() {
+ int replicationFactor;
+ if (getClusterSize() < 3) {
+ replicationFactor = getClusterSize();
+ } else if (getClusterSize() < 4) {
+ replicationFactor = 2;
+ } else {
+ replicationFactor = 3;
}
+ return replicationFactor;
}
- protected int extractVersionFromUpdateFile(String file) {
- file = file.substring(file.lastIndexOf('/') + 1);
- file = file.substring(0, file.indexOf('.'));
- return Integer.parseInt(file);
- }
-
- protected List<String> getSteps(String file) throws Exception {
- List<String> steps = new ArrayList<String>();
- InputStream stream = null;
+ /**
+ * Run a CQL query to retrieve the current replication factor for RHQ schema.
+ *
+ * @return existing replication factor
+ */
+ protected int queryReplicationFactor() {
+ int replicationFactor = 1;
try {
- stream = SchemaManager.class.getClassLoader().getResourceAsStream(file);
-
- DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
- DocumentBuilder builder = factory.newDocumentBuilder();
- Document doc = builder.parse(stream);
+ ResultSet resultSet = executeManagementQuery(Query.REPLICATION_FACTOR);
+ Row row = resultSet.one();
- Node rootDocument = doc.getElementsByTagName(UPDATE_PLAN_ELEMENT).item(0);
- NodeList updateStepElements = rootDocument.getChildNodes();
+ String replicationFactorString = "replication_factor\"";
+ String resultString = row.getString(0);
+ resultString = resultString.substring(resultString.indexOf(replicationFactorString)
+ + replicationFactorString.length());
+ resultString = resultString.substring(resultString.indexOf('"') + 1);
+ resultString = resultString.substring(0, resultString.indexOf('"'));
- for (int index = 0; index < updateStepElements.getLength(); index++) {
- Node updateStepElement = updateStepElements.item(index);
- if (STEP_ELEMENT.equals(updateStepElement.getNodeName()) && updateStepElement.getTextContent() != null) {
- steps.add(updateStepElement.getTextContent());
- }
- }
+ replicationFactor = Integer.parseInt(resultString);
} catch (Exception e) {
- log.error("Error reading the list of steps from " + file + " file.", e);
- throw e;
- } finally {
- if (stream != null) {
- try {
- stream.close();
- } catch (Exception e) {
- log.error("Error closing the stream with the list of steps from " + file + " file.", e);
- throw e;
- }
- }
+ log.error(e);
}
- return steps;
+ return replicationFactor;
}
- protected List<String> getUpdateFiles(String folder) throws Exception {
- List<String> files = new ArrayList<String>();
- InputStream stream = null;
-
- try {
- URL resourceFolderURL = this.getClass().getClassLoader().getResource(folder);
+ /**
+ * Execute a named management query.
+ *
+ * @param query named management query
+ * @return result
+ */
+ protected ResultSet executeManagementQuery(Query query) {
+ return executeManagementQuery(query, null);
+ }
- if (resourceFolderURL.getProtocol().equals("file")) {
- stream = this.getClass().getClassLoader().getResourceAsStream(folder);
- BufferedReader reader = new BufferedReader(new InputStreamReader(stream));
+ /**
+ * Execute a named management query with the given property (name,value).
+ *
+ * @param query named management query
+ * @param propertyName property name
+ * @param propertyValue property value.
+ * @return
+ */
+ protected ResultSet executeManagementQuery(Query query, String propertyName, String propertyValue) {
+ Properties properties = new Properties();
+ properties.put(propertyName, propertyValue);
+ return executeManagementQuery(query, properties);
+ }
- String updateFile;
- while ((updateFile = reader.readLine()) != null) {
- files.add(folder + updateFile);
- }
- } else if (resourceFolderURL.getProtocol().equals("jar")) {
- URL jarURL = this.getClass().getClassLoader().getResources(folder).nextElement();
- JarURLConnection jarURLCon = (JarURLConnection) (jarURL.openConnection());
- JarFile jarFile = jarURLCon.getJarFile();
- Enumeration<JarEntry> entries = jarFile.entries();
- while (entries.hasMoreElements()) {
- String entry = entries.nextElement().getName();
- if (entry.startsWith(folder) && !entry.equals(folder)) {
- files.add(entry);
- }
- }
- }
+ /**
+ * Execute a named management query with the given properties.
+ *
+ * @param query named management query
+ * @param properties properties
+ * @return
+ */
+ protected ResultSet executeManagementQuery(Query query, Properties properties) {
+ String queryString = managementTasks.getNamedStep(query.toString(), properties);
+ return execute(queryString);
+ }
- Collections.sort(files, new Comparator<String>() {
- @Override
- public int compare(String o1, String o2) {
- return o1.compareTo(o2);
- }
- });
- } catch (Exception e) {
- log.error("Error reading the list of update files.", e);
- throw e;
- } finally {
- if (stream != null) {
- try{
- stream.close();
- } catch (Exception e) {
- log.error("Error closing the stream with the list of update files.", e);
- throw e;
- }
- }
- }
- return files;
+ /**
+ * Execute all the queries in an update file as returned by @link {@link UpdateFile#getOrderedSteps()}.
+ *
+ * @param updateFile update file
+ * @return list of result sets, one for each executed query.
+ */
+ protected List<ResultSet> execute(UpdateFile updateFile) {
+ return execute(updateFile, null);
}
- protected void initCluster() {
- initCluster(username, password);
+ /**
+ * Execute all the queries in an update file as returned by @link {@link UpdateFile#getOrderedSteps(Properties))} with
+ * the given property (name,value).
+ *
+ * @param updateFile update file
+ * @param propertyName property name
+ * @param propertyValue property value
+ * @return list of result sets, one for each executed query.
+ */
+ protected List<ResultSet> execute(UpdateFile updateFile, String propertyName, String propertyValue) {
+ Properties properties = new Properties();
+ properties.put(propertyName, propertyValue);
+ return execute(updateFile, properties);
}
- protected void initCluster(String username, String password) {
- String[] hostNames = new String[nodes.size()];
- for (int i = 0; i < hostNames.length; ++i) {
- hostNames[i] = nodes.get(i).getAddress();
+ /**
+ * Execute all the queries in an update file as returned by @link {@link UpdateFile#getOrderedSteps(Properties))} with
+ * the given property (name,value).
+ *
+ * @param updateFile update file
+ * @param properties properties
+ * @return list of result sets, one for each executed query.
+ */
+ protected List<ResultSet> execute(UpdateFile updateFile, Properties properties) {
+ List<ResultSet> results = new ArrayList<ResultSet>();
+
+ log.info("Applying update file: " + updateFile);
+ for (String step : updateFile.getOrderedSteps(properties)) {
+ log.info("Statement: \n" + step);
+ results.add(execute(step));
}
+ log.info("Applied update file: " + updateFile);
- log.info("Initializing session to connect to " + StringUtil.arrayToString(hostNames));
-
- Cluster cluster = new ClusterBuilder().addContactPoints(hostNames).withCredentials(username, password)
- .withPort(nodes.get(0).getCqlPort()).withCompression(Compression.NONE).build();
-
- log.info("Cluster connection configured.");
-
- session = cluster.connect("system");
- log.info("Cluster connected.");
+ return results;
}
- protected void shutdown() {
- log.info("Shutting down connections");
- session.getCluster().shutdown();
+ /**
+ * Execute a CQL query.
+ *
+ * @param query query
+ * @return result for the query
+ */
+ protected ResultSet execute(String query) {
+ return session.execute(query);
}
- protected int getReplicationFactor() {
- int replicationFactor = 1;
- try {
- String replicationFactorString = "replication_factor\"";
-
- ResultSet resultSet = session.execute(REPLICATION_FACTOR_QUERY);
- Row row = resultSet.one();
-
- String resultString = row.getString(0);
- resultString = resultString.substring(resultString.indexOf(replicationFactorString)
- + replicationFactorString.length());
- resultString = resultString.substring(resultString.indexOf('"') + 1);
- resultString = resultString.substring(0, resultString.indexOf('"'));
-
- replicationFactor = Integer.parseInt(resultString);
- } catch (Exception e) {
- log.error(e);
- }
-
- return replicationFactor;
- }
}
diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java
index 2836964..8d28bfa 100644
--- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java
+++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/SchemaManager.java
@@ -29,8 +29,6 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.log4j.ConsoleAppender;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
@@ -43,8 +41,6 @@ import org.rhq.core.domain.cloud.StorageNode;
*/
public class SchemaManager {
- private final Log log = LogFactory.getLog(SchemaManager.class);
-
/**
* The username that RHQ will use to connect to the storage cluster.
*/
@@ -83,25 +79,53 @@ public class SchemaManager {
this.nodes.addAll(nodes);
}
+ /**
+ * Install and update the RHQ schema.
+ *
+ * @throws Exception
+ */
public void install() throws Exception {
VersionManager version = new VersionManager(username, password, nodes);
version.install();
}
+ /**
+ * Drop RHQ schema and revert the database to pre-RHQ state.
+ *
+ * @throws Exception
+ */
public void drop() throws Exception {
VersionManager version = new VersionManager(username, password, nodes);
version.drop();
}
- public boolean updateTopology(boolean isNewSchema) throws Exception {
+ /**
+ * Update cluster topology settings, such as replication factor.
+ *
+ * @param isNewSchema
+ * @return
+ * @throws Exception
+ */
+ public void updateTopology() throws Exception {
TopologyManager topology = new TopologyManager(username, password, nodes);
- return topology.updateTopology(isNewSchema);
+ topology.updateTopology();
}
+ /**
+ * Returns the list of storage nodes.
+ *
+ * @return list of storage nodes
+ */
public List<StorageNode> getStorageNodes() {
return nodes;
}
+ /**
+ * Parse raw string that contains the list of storage nodes.
+ *
+ * @param nodes list of storage nodes
+ * @return
+ */
private static List<StorageNode> parseNodeInformation(String... nodes) {
List<StorageNode> parsedNodes = new ArrayList<StorageNode>();
for (String node : nodes) {
@@ -113,6 +137,12 @@ public class SchemaManager {
return parsedNodes;
}
+ /**
+ * A main runner used for direct usage of the schema manager.
+ *
+ * @param args arguments
+ * @throws Exception
+ */
public static void main(String[] args) throws Exception {
try {
Logger root = Logger.getRootLogger();
@@ -126,29 +156,29 @@ public class SchemaManager {
System.out.println("Usage : command username password nodes...");
System.out.println("\n");
System.out.println("Commands : install | drop | topology");
- System.out.println("Node format: hostname|thriftPort|nativeTransportPort");
-
+ System.out.println("Node format: hostname|jmxPort|cqlPort");
return;
}
String command = args[0];
String username = args[1];
String password = args[2];
+ String[] hosts = Arrays.copyOfRange(args, 3, args.length);
- SchemaManager schemaManager = new SchemaManager(username, password,
- Arrays.copyOfRange(args, 3, args.length));
+ SchemaManager schemaManager = new SchemaManager(username, password, hosts);
if ("install".equalsIgnoreCase(command)) {
schemaManager.install();
} else if ("drop".equalsIgnoreCase(command)) {
schemaManager.drop();
} else if ("topology".equalsIgnoreCase(command)) {
- schemaManager.updateTopology(true);
+ schemaManager.updateTopology();
} else {
throw new IllegalArgumentException(command + " not available.");
}
} catch (Exception e) {
System.err.println(e);
+ e.printStackTrace();
} finally {
System.exit(0);
}
diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java
index fd987a1..6c08faa 100644
--- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java
+++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/TopologyManager.java
@@ -27,9 +27,6 @@ package org.rhq.cassandra.schema;
import java.util.List;
-import com.datastax.driver.core.BoundStatement;
-import com.datastax.driver.core.PreparedStatement;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -38,12 +35,11 @@ import org.rhq.core.domain.cloud.StorageNode;
/**
* @author Stefan Negrea
*/
-public class TopologyManager extends AbstractManager {
-
- private final Log log = LogFactory.getLog(TopologyManager.class);
+class TopologyManager extends AbstractManager {
private static final String TOPOLOGY_BASE_FOLDER = "topology";
+ private final Log log = LogFactory.getLog(TopologyManager.class);
private enum Task {
UpdateReplicationFactor("0001.xml"),
@@ -55,12 +51,8 @@ public class TopologyManager extends AbstractManager {
this.file = file;
}
- protected String getFile(boolean isNewSchema) {
- if (isNewSchema) {
- return TOPOLOGY_BASE_FOLDER + "/create/" + this.file;
- }
-
- return TOPOLOGY_BASE_FOLDER + "/update/" + this.file;
+ protected String getFile() {
+ return TOPOLOGY_BASE_FOLDER + "/" + this.file;
}
}
@@ -68,78 +60,65 @@ public class TopologyManager extends AbstractManager {
super(username, password, nodes);
}
- public boolean updateTopology(boolean isNewSchema) throws Exception {
- boolean result = false;
-
- initCluster();
- if (schemaExists()) {
- log.info("Applying topology updates...");
- result = this.updateReplicationFactor(isNewSchema, nodes.size());
- this.updateGCGrace(isNewSchema, nodes.size());
- } else {
- log.info("Topology updates cannot be applied because the schema is not installed.");
+ /**
+ * Updates cluster topology settings:
+ * 1) replication factor
+ * 2) gc grace period
+ *
+ * @return true if update successful, false otherwise.
+ */
+ public void updateTopology() {
+ try {
+ initClusterSession();
+ if (schemaExists()) {
+ log.info("Applying topology updates...");
+ updateReplicationFactor();
+ updateGCGrace();
+ } else {
+ log.info("Topology updates cannot be applied because the schema is not installed.");
+ }
+ } finally {
+ shutdownClusterConnection();
}
- shutdown();
-
- return result;
}
- private boolean updateReplicationFactor(boolean isNewSchema, int numberOfNodes) throws Exception {
+ /**
+ * Update replication factor based on the current set of storage nodes.
+ *
+ * @return true if successful, false otherwise.
+ */
+ private void updateReplicationFactor() {
log.info("Starting to execute " + Task.UpdateReplicationFactor + " task.");
- int replicationFactor = 1;
-
- if (numberOfNodes == 2) {
- replicationFactor = 2;
- } else if (numberOfNodes == 3) {
- replicationFactor = 2;
- } else if (numberOfNodes > 3) {
- replicationFactor = 3;
- }
-
- if (getReplicationFactor() == replicationFactor) {
- return false;
- }
-
- log.info("Applying file " + Task.UpdateReplicationFactor.getFile(isNewSchema) + " for " +
- Task.UpdateReplicationFactor + " task.");
- for (String query : this.getSteps(Task.UpdateReplicationFactor.getFile(isNewSchema))) {
- executedPreparedStatement(query, replicationFactor);
+ int newReplicationFactor = calculateNewReplicationFactor();
+ int existingReplicationFactor = queryReplicationFactor();
+ if (existingReplicationFactor == newReplicationFactor) {
+ log.info("No need to update replication factor. Replication factor already " + newReplicationFactor);
+ } else {
+ execute(new UpdateFile(Task.UpdateReplicationFactor.getFile()), "replication_factor", newReplicationFactor
+ + "");
+ log.info("Updated replication factor from " + existingReplicationFactor + " to " + newReplicationFactor);
}
- log.info("File " + Task.UpdateReplicationFactor.getFile(isNewSchema) + " applied for " +
- Task.UpdateReplicationFactor + " task.");
log.info("Successfully executed " + Task.UpdateReplicationFactor + " task.");
- return true;
}
- private boolean updateGCGrace(boolean isNewSchema, int numberOfNodes) throws Exception {
+ /**
+ * Update gc grace interval based on the current set of storage nodes.
+ */
+ private void updateGCGrace() {
log.info("Starting to execute " + Task.UpdateGCGrace + " task.");
int gcGraceSeconds = 864000;
- if (numberOfNodes == 1) {
+ if (getClusterSize() == 1) {
gcGraceSeconds = 0;
} else {
gcGraceSeconds = 691200; // 8 days
}
-
- log.info("Applying file " + Task.UpdateGCGrace.getFile(isNewSchema) + " for " + Task.UpdateGCGrace + " task.");
- for (String query : this.getSteps(Task.UpdateGCGrace.getFile(isNewSchema))) {
- executedPreparedStatement(query, gcGraceSeconds);
- }
- log.info("File " + Task.UpdateGCGrace.getFile(isNewSchema) + " applied for " + Task.UpdateGCGrace + " task.");
+ execute(new UpdateFile(Task.UpdateGCGrace.getFile()), "gc_grace_seconds", gcGraceSeconds + "");
+ log.info("Updated gc_grace_seconds to " + gcGraceSeconds);
log.info("Successfully executed " + Task.UpdateGCGrace + " task.");
- return true;
}
-
- private void executedPreparedStatement(String query, Object... values) {
- String formattedQuery = String.format(query, values);
- log.info("Statement: \n" + formattedQuery);
- PreparedStatement preparedStatement = session.prepare(formattedQuery);
- BoundStatement boundStatement = preparedStatement.bind();
- session.execute(boundStatement);
- }
-
}
diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFile.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFile.java
new file mode 100644
index 0000000..a5cf33c
--- /dev/null
+++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFile.java
@@ -0,0 +1,235 @@
+/*
+ *
+ * * RHQ Management Platform
+ * * Copyright (C) 2005-2012 Red Hat, Inc.
+ * * All rights reserved.
+ * *
+ * * This program is free software; you can redistribute it and/or modify
+ * * it under the terms of the GNU General Public License, version 2, as
+ * * published by the Free Software Foundation, and/or the GNU Lesser
+ * * General Public License, version 2.1, also as published by the Free
+ * * Software Foundation.
+ * *
+ * * This program is distributed in the hope that it will be useful,
+ * * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * * GNU General Public License and the GNU Lesser General Public License
+ * * for more details.
+ * *
+ * * You should have received a copy of the GNU General Public License
+ * * and the GNU Lesser General Public License along with this program;
+ * * if not, write to the Free Software Foundation, Inc.,
+ * * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+package org.rhq.cassandra.schema;
+
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Properties;
+import java.util.Set;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.w3c.dom.Document;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+
+/**
+ * @author Stefan Negrea
+ */
+class UpdateFile implements Comparable<UpdateFile> {
+
+ private static final String UPDATE_PLAN_ELEMENT = "updatePlan";
+ private static final String STEP_ELEMENT = "step";
+
+ private final Log log = LogFactory.getLog(UpdateFile.class);
+
+ private final String file;
+
+ public UpdateFile(String file) {
+ this.file = file;
+ }
+
+ public String getFile() {
+ return this.file;
+ }
+
+ /**
+ * Retrieve a named step from the list of steps ready to be executed
+ * on Cassandra.
+ *
+ * @param name step name
+ * @return step
+ * @throws Exception
+ */
+ public String getNamedStep(String name) {
+ return getNamedStep(name, null);
+ }
+
+ /**
+ * Retrieve a named step from the list of steps ready to be executed
+ * on Cassandra. The step will go through variable binding process with the
+ * provided properties.
+ *
+ * @param name step name
+ * @param properties properties to bind
+ * @return step
+ */
+ public String getNamedStep(String name, Properties properties) {
+ List<Node> stepNodes = getStepNodes();
+ for (Node stepNode : stepNodes) {
+ Node nameNode = stepNode.getAttributes().getNamedItem("name");
+ if (nameNode != null && nameNode.getNodeValue().equals(name)) {
+ return bind(stepNode.getTextContent(), properties);
+ }
+ }
+
+ return null;
+ }
+
+ /**
+ * Retrieve all the steps in the file in declaration order. The steps are ready to
+ * be executed.
+ *
+ * @return list of steps
+ */
+ public List<String> getOrderedSteps() {
+ return getOrderedSteps(null);
+ }
+
+ /**
+ * Retrieve all the steps in the file in declaration order. The steps are ready to
+ * be executed. Each step will go through variable binding process with the
+ * provided properties.
+ *
+ * @param properties properties to bind.
+ * @return
+ */
+ public List<String> getOrderedSteps(Properties properties) {
+ List<String> boundSteps = new ArrayList<String>();
+ List<Node> stepNodes = getStepNodes();
+
+ for (Node stepNode : stepNodes) {
+ boundSteps.add(bind(stepNode.getTextContent(), properties));
+ }
+
+ return boundSteps;
+ }
+
+ /**
+ * Retrieve unbound list of steps from the file in declaration order.
+ *
+ * @return unbound list of steps.
+ */
+ private List<Node> getStepNodes() {
+ InputStream stream = null;
+ try {
+ stream = SchemaManager.class.getClassLoader().getResourceAsStream(file);
+
+ DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
+ DocumentBuilder builder = factory.newDocumentBuilder();
+ Document doc = builder.parse(stream);
+
+ NodeList updateElements = doc.getElementsByTagName(UPDATE_PLAN_ELEMENT);
+ if (updateElements == null || updateElements.getLength() != 1) {
+ throw new RuntimeException("No <updatePlan> elements found");
+ }
+
+ Node rootDocument = updateElements.item(0);
+ NodeList updateStepElements = rootDocument.getChildNodes();
+
+ List<Node> stepList = new ArrayList<Node>();
+ for (int index = 0; index < updateStepElements.getLength(); index++) {
+ Node updateStepElement = updateStepElements.item(index);
+ if (STEP_ELEMENT.equals(updateStepElement.getNodeName()) && updateStepElement.getTextContent() != null) {
+ stepList.add(updateStepElements.item(index));
+ }
+ }
+
+ return stepList;
+ } catch (Exception e) {
+ log.error("Error reading the list of steps from " + file + " file.", e);
+ throw new RuntimeException(e);
+ } finally {
+ if (stream != null) {
+ try {
+ stream.close();
+ } catch (Exception e) {
+ log.error("Error closing the stream with the list of steps from " + file + " file.", e);
+ throw new RuntimeException(e);
+ }
+ }
+ }
+ }
+
+ /**
+ * Bind the set of provided properties to the input step. The text should have
+ * all the variable to be bound in %variable_name% form.
+ *
+ * This method should be called even if no properties are provided because it will
+ * throw a runtime exception if the text contains properties that are expected to be
+ * bound but the list of variable is either empty or does not contain
+ * them.
+ *
+ * @param unboundText unbound text
+ * @param properties properties to bind
+ * @return properties bound text
+ */
+ private String bind(String unboundText, Properties properties) {
+ Set<String> foundProperties = new HashSet<String>();
+ Pattern regex = Pattern.compile("\\%([^%]*)\\%");
+ Matcher matchPattern = regex.matcher(unboundText);
+ while (matchPattern.find()) {
+ String matchedString = matchPattern.group();
+ String property = matchedString.substring(1, matchedString.length() - 1);
+ foundProperties.add(property);
+ }
+
+ String boundText = unboundText;
+
+ if( foundProperties.size() !=0 && properties == null){
+ throw new RuntimeException("No properties provided but " + foundProperties.size()
+ + " required for binding.");
+ } else if (foundProperties.size() != 0) {
+ for (String foundProperty : foundProperties) {
+ String propertyValue = properties.getProperty(foundProperty);
+ if (propertyValue == null) {
+ throw new RuntimeException("Cannot bind query. Property [" + foundProperty + "] not found.");
+ }
+
+ boundText = boundText.replaceAll("\\%" + foundProperty + "\\%", propertyValue);
+ }
+ }
+
+ return boundText;
+ }
+
+ /**
+ * Extract the version from the file name.
+ *
+ * @return version
+ */
+ public int extractVersion() {
+ String filename = this.getFile();
+ filename = filename.substring(filename.lastIndexOf('/') + 1);
+ filename = filename.substring(0, filename.indexOf('.'));
+ return Integer.parseInt(filename);
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Comparable#compareTo(java.lang.Object)
+ */
+ @Override
+ public int compareTo(UpdateFile o) {
+ return this.getFile().compareTo(o.getFile());
+ }
+}
diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFolder.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFolder.java
new file mode 100644
index 0000000..152a757
--- /dev/null
+++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/UpdateFolder.java
@@ -0,0 +1,141 @@
+/*
+ *
+ * * RHQ Management Platform
+ * * Copyright (C) 2005-2012 Red Hat, Inc.
+ * * All rights reserved.
+ * *
+ * * This program is free software; you can redistribute it and/or modify
+ * * it under the terms of the GNU General Public License, version 2, as
+ * * published by the Free Software Foundation, and/or the GNU Lesser
+ * * General Public License, version 2.1, also as published by the Free
+ * * Software Foundation.
+ * *
+ * * This program is distributed in the hope that it will be useful,
+ * * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * * GNU General Public License and the GNU Lesser General Public License
+ * * for more details.
+ * *
+ * * You should have received a copy of the GNU General Public License
+ * * and the GNU Lesser General Public License along with this program;
+ * * if not, write to the Free Software Foundation, Inc.,
+ * * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+package org.rhq.cassandra.schema;
+
+import java.io.BufferedReader;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.JarURLConnection;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Enumeration;
+import java.util.List;
+import java.util.jar.JarEntry;
+import java.util.jar.JarFile;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+/**
+ * @author Stefan Negrea
+ */
+class UpdateFolder {
+
+ private final Log log = LogFactory.getLog(UpdateFolder.class);
+
+ private final String folder;
+ private final List<UpdateFile> updateFiles;
+
+ public UpdateFolder(String folder) throws Exception {
+ this.folder = folder;
+ this.updateFiles = this.loadUpdateFiles();
+ }
+
+ /**
+ * Removes all the update files up to and including the provided version.
+ *
+ * @param currentVersion current version
+ */
+ public void removeAppliedUpdates(int currentVersion) {
+ List<UpdateFile> updateFiles = this.getUpdateFiles();
+ while (!updateFiles.isEmpty()) {
+ int version = updateFiles.get(0).extractVersion();
+ if (version <= currentVersion) {
+ updateFiles.remove(0);
+ } else {
+ break;
+ }
+ }
+ }
+
+ /**
+ * Return the list of available update files.
+ *
+ * @return list of update files
+ */
+ public List<UpdateFile> getUpdateFiles() {
+ return this.updateFiles;
+ }
+
+ /**
+ * Loads the initial set of update files based on the input folder.
+ *
+ * @return list of update files
+ * @throws Exception
+ */
+ private List<UpdateFile> loadUpdateFiles() throws Exception {
+ List<UpdateFile> files = new ArrayList<UpdateFile>();
+ InputStream stream = null;
+
+ try {
+ URL resourceFolderURL = this.getClass().getClassLoader().getResource(folder);
+
+ if (resourceFolderURL.getProtocol().equals("file")) {
+ stream = this.getClass().getClassLoader().getResourceAsStream(folder);
+ BufferedReader reader = new BufferedReader(new InputStreamReader(stream));
+
+ String updateFile;
+ while ((updateFile = reader.readLine()) != null) {
+ files.add(new UpdateFile(folder + updateFile));
+ }
+ } else if (resourceFolderURL.getProtocol().equals("jar")) {
+ URL jarURL = this.getClass().getClassLoader().getResources(folder).nextElement();
+ JarURLConnection jarURLCon = (JarURLConnection) (jarURL.openConnection());
+ JarFile jarFile = jarURLCon.getJarFile();
+ Enumeration<JarEntry> entries = jarFile.entries();
+ while (entries.hasMoreElements()) {
+ String entry = entries.nextElement().getName();
+ if (entry.startsWith(folder) && !entry.equals(folder) && !entry.equals(folder + "/")) {
+ files.add(new UpdateFile(entry));
+ }
+ }
+ }
+
+ Collections.sort(files, new Comparator<UpdateFile>() {
+ @Override
+ public int compare(UpdateFile o1, UpdateFile o2) {
+ return o1.compareTo(o2);
+ }
+ });
+ } catch (Exception e) {
+ log.error("Error reading the list of update files.", e);
+ throw e;
+ } finally {
+ if (stream != null) {
+ try{
+ stream.close();
+ } catch (Exception e) {
+ log.error("Error closing the stream with the list of update files.", e);
+ throw e;
+ }
+ }
+ }
+
+ return files;
+ }
+}
diff --git a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java
index e2daa2f..794e991 100644
--- a/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java
+++ b/modules/common/cassandra-schema/src/main/java/org/rhq/cassandra/schema/VersionManager.java
@@ -25,14 +25,11 @@
package org.rhq.cassandra.schema;
-import java.util.Date;
import java.util.List;
+import java.util.Properties;
import java.util.UUID;
-import com.datastax.driver.core.BoundStatement;
-import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.exceptions.AuthenticationException;
-import com.datastax.driver.core.exceptions.NoHostAvailableException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -42,10 +39,9 @@ import org.rhq.core.domain.cloud.StorageNode;
/**
* @author Stefan Negrea
*/
-public class VersionManager extends AbstractManager {
+class VersionManager extends AbstractManager {
private static final String SCHEMA_BASE_FOLDER = "schema";
- private static final String INSERT_VERSION_QUERY = "INSERT INTO rhq.schema_version (version, time ) VALUES ( ?, ?);";
private final Log log = LogFactory.getLog(VersionManager.class);
@@ -65,125 +61,170 @@ public class VersionManager extends AbstractManager {
}
}
- public VersionManager(String username, String password, List<StorageNode> nodes) {
+ public VersionManager(String username, String password, List<StorageNode> nodes) throws Exception {
super(username, password, nodes);
}
+ /**
+ * Install and update the RHQ schema:
+ * 1) If the schema does not exist then attempt to create it and then run the updates in order.
+ * 2) If the schema exists then run the updates in order.
+ *
+ * @throws Exception
+ */
public void install() throws Exception {
log.info("Preparing to install schema");
+
+ boolean clusterSessionInitialized = false;
try {
- initCluster();
+ initClusterSession();
+ clusterSessionInitialized = true;
} catch (AuthenticationException e) {
- // If we cannot connect with the rhqadmin user, then assume it has not been
- // created; so, we need to perform the "bootstrap" step of creating the user
- // before we apply any schema changes. We want to create the user first so that
- // we can go ahead and remove the default cassandra user and apply all changes
- // using the rhqadmin user.
- bootstrap();
+ log.debug("Authentication exception. Will now attempt to create the schema.");
+ log.debug(e);
+ } finally {
+ shutdownClusterConnection();
}
+ if (!clusterSessionInitialized) {
+ create();
+ }
+
+ update();
+ }
+
+ /**
+ * Create RHQ schema and make related updates to the Cassandra installation.
+ *
+ * @throws Exception
+ */
+ private void create() throws Exception {
+ UpdateFolder updateFolder = new UpdateFolder(Task.Create.getFolder());
+
+ Properties properties = new Properties(System.getProperties());
+ properties.put("replication_factor", calculateNewReplicationFactor() + "");
+ properties.put("cassandra_user_password", UUID.randomUUID() + "");
+ properties.put("rhq_admin_username", getUsername());
+ properties.put("rhq_admin_password", getPassword());
+
+ /**
+ * NOTE: Before applying any schema, we need to create the rhqadmin user. If we have more
+ * than a single node cluster then we also need to set the RF of the system_auth
+ * keyspace BEFORE we create the rhqadmin user. If we do not do in this order we will
+ * get inconsistent reads which will can result in failed authentication.
+ */
+ //1. Execute the creation of RHQ schema, version table, admin user.
try {
- initCluster();
+ initClusterSession(DEFAULT_CASSANDRA_USER, DEFAULT_CASSANDRA_PASSWORD);
if (!schemaExists()) {
- session.execute("ALTER USER cassandra NOSUPERUSER");
- session.execute("ALTER USER cassandra WITH PASSWORD '" + UUID.randomUUID() + "'");
- this.executeTask(Task.Create);
- } else {
+ execute(updateFolder.getUpdateFiles().get(0), properties);
+ } else {
log.info("RHQ schema already exists.");
}
- this.executeTask(Task.Update);
+ } catch (Exception ex) {
+ log.error(ex);
+ throw new RuntimeException(ex);
+ } finally {
+ shutdownClusterConnection();
+ }
+
+ //2. Change Cassandra default user privileges and password.
+ try {
+ initClusterSession();
+ execute(updateFolder.getUpdateFiles().get(1), properties);
} finally {
- shutdown();
+ shutdownClusterConnection();
}
}
/**
- * Before applying any schema, we need to create the rhqadmin user. If we have more
- * than a single node cluster then we also need to set the RF of the system_auth
- * keyspace BEFORE we create the rhqadmin user. If we do not do in this order we will
- * get inconsistent reads which will can result in failed authentication.
+ * Update existing schema to the most current version in the update folder.
+ *
+ * @throws Exception
*/
- public void bootstrap() {
+ private void update() throws Exception {
try {
- initCluster("cassandra", "cassandra");
+ initClusterSession();
- int replicationFactor;
- if (nodes.size() < 3) {
- replicationFactor = nodes.size();
- } else if (nodes.size() < 4) {
- replicationFactor = 2;
- } else {
- replicationFactor = 3;
+ if (!schemaExists()) {
+ log.error("Schema not installed.");
+ throw new RuntimeException("Schema not installed propertly, cannot apply schema updates.");
}
- log.info("Updating replication_factor of system_auth keyspace to " + replicationFactor);
- session.execute("ALTER KEYSPACE system_auth WITH replication = {'class': 'SimpleStrategy', " +
- "'replication_factor': " + replicationFactor + "}");
- log.info("Creating rhqadmin user");
- session.execute("CREATE USER rhqadmin WITH PASSWORD 'rhqadmin' SUPERUSER");
+ UpdateFolder updateFolder = new UpdateFolder(Task.Update.getFolder());
+
+ int currentSchemaVersion = getSchemaVersion();
+ log.info("Current schema version is " + currentSchemaVersion);
+ updateFolder.removeAppliedUpdates(currentSchemaVersion);
+
+ if (updateFolder.getUpdateFiles().size() == 0) {
+ log.info("RHQ schema is current! No updates applied.");
+ } else {
+ for (UpdateFile updateFile : updateFolder.getUpdateFiles()) {
+ execute(updateFile);
+
+ Properties versionProperties = new Properties();
+ versionProperties.put("version", updateFile.extractVersion() + "");
+ versionProperties.put("time", System.currentTimeMillis() + "");
+ executeManagementQuery(Query.INSERT_SCHEMA_VERSION, versionProperties);
+
+ log.info("RHQ schema update " + updateFile +" applied.");
+ }
+ }
} finally {
- shutdown();
+ shutdownClusterConnection();
}
}
+ /**
+ * Drop RHQ schema and revert the database to pre-RHQ state:
+ * 1) Reinstate Cassandra superuser
+ * 2) Drop RHQ schema
+ * 3) Drop RHQ user
+ *
+ * @throws Exception
+ */
public void drop() throws Exception {
log.info("Preparing to drop RHQ schema");
- try {
- initCluster();
- if (schemaExists()) {
- this.executeTask(Task.Drop);
- } else {
- log.info("RHQ schema does not exist. Drop operation not required.");
- }
- } catch (NoHostAvailableException e) {
+ UpdateFolder updateFolder = new UpdateFolder(Task.Drop.getFolder());
+ Properties properties = new Properties(System.getProperties());
+ properties.put("rhq_admin_username", getUsername());
+
+ try{
+ initClusterSession();
+ //1. Reinstated Cassandra superuser
+ execute(updateFolder.getUpdateFiles().get(0), properties);
+ log.info("Cassandra user reverted to default configuration.");
+ } catch (Exception e) {
throw new RuntimeException(e);
} finally {
- shutdown();
+ shutdownClusterConnection();
}
- }
- private void executeTask(Task task) {
try {
- log.info("Starting to execute " + task + " task.");
-
- List<String> updateFiles = this.getUpdateFiles(task.getFolder());
+ //Use Cassandra superuser to drop RHQ schema and user
+ initClusterSession(DEFAULT_CASSANDRA_USER, DEFAULT_CASSANDRA_PASSWORD);
- if (Task.Update.equals(task)) {
- int currentSchemaVersion = this.getSchemaVersion();
- log.info("Current schema version is " + currentSchemaVersion);
- this.removeAppliedUpdates(updateFiles, currentSchemaVersion);
- }
-
- if (updateFiles.size() == 0 && Task.Update.equals(task)) {
- log.info("RHQ schema is current! No updates applied.");
+ if (schemaExists()) {
+ //2. Drop RHQ schema
+ execute(updateFolder.getUpdateFiles().get(1), properties);
+ log.info("RHQ schema dropped.");
+ } else {
+ log.info("RHQ schema does not exist. Drop operation not required.");
}
- for (String updateFile : updateFiles) {
- log.info("Applying file " + updateFile + " for " + task + " task.");
- for (String step : getSteps(updateFile)) {
- log.info("Statement: \n" + step);
- session.execute(step);
- }
-
- if (Task.Update.equals(task)) {
- this.updateSchemaVersion(updateFile);
- }
-
- log.info("File " + updateFile + " applied for " + task + " task.");
+ if (userExists()) {
+ //3. Drop RHQ user
+ execute(updateFolder.getUpdateFiles().get(2), properties);
+ log.info("RHQ admin user dropped.");
+ } else {
+ log.info("RHQ admin user does not exist. Drop operation not required.");
}
} catch (Exception e) {
- log.error(e);
throw new RuntimeException(e);
+ } finally {
+ shutdownClusterConnection();
}
-
- log.info("Successfully executed " + task + " task.");
- }
-
- private void updateSchemaVersion(String updateFileName) {
- PreparedStatement preparedStatement = session.prepare(INSERT_VERSION_QUERY);
- BoundStatement boundStatement = preparedStatement.bind(this.extractVersionFromUpdateFile(updateFileName),
- new Date());
- session.execute(boundStatement);
}
}
diff --git a/modules/common/cassandra-schema/src/main/resources/management/0001.xml b/modules/common/cassandra-schema/src/main/resources/management/0001.xml
new file mode 100644
index 0000000..9fa4a16
--- /dev/null
+++ b/modules/common/cassandra-schema/src/main/resources/management/0001.xml
@@ -0,0 +1,25 @@
+<updatePlan>
+ <step name="user_exists">
+ SELECT * FROM system_auth.users WHERE name = '%username%'
+ </step>
+
+ <step name="schema_exists">
+ SELECT * FROM system.schema_keyspaces WHERE keyspace_name = 'rhq'
+ </step>
+
+ <step name="version_columnfamily_exists">
+ SELECT * from system.schema_columnfamilies WHERE keyspace_name='rhq' AND columnfamily_name='schema_version'
+ </step>
+
+ <step name="version">
+ SELECT version FROM rhq.schema_version
+ </step>
+
+ <step name="replication_factor">
+ SELECT strategy_options FROM system.schema_keyspaces where keyspace_name='rhq'
+ </step>
+
+ <step name="insert_schema_version">
+ INSERT INTO rhq.schema_version (version, time ) VALUES ( %version%, %time%)
+ </step>
+</updatePlan>
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/main/resources/schema/create/0001.xml b/modules/common/cassandra-schema/src/main/resources/schema/create/0001.xml
index 7a8d901..3f2db38 100644
--- a/modules/common/cassandra-schema/src/main/resources/schema/create/0001.xml
+++ b/modules/common/cassandra-schema/src/main/resources/schema/create/0001.xml
@@ -1,15 +1,22 @@
<updatePlan>
<step>
- CREATE KEYSPACE rhq WITH
- replication = {'class': 'SimpleStrategy', 'replication_factor': 1};
+ ALTER KEYSPACE system_auth WITH replication = {'class': 'SimpleStrategy', 'replication_factor': %replication_factor% }
</step>
<step>
- GRANT ALL PERMISSIONS ON KEYSPACE system_auth to rhqadmin;
+ CREATE USER %rhq_admin_username% WITH PASSWORD '%rhq_admin_password%' SUPERUSER
</step>
<step>
- GRANT ALL PERMISSIONS ON KEYSPACE rhq to rhqadmin;
+ CREATE KEYSPACE rhq WITH replication = {'class': 'SimpleStrategy', 'replication_factor': %replication_factor% }
+ </step>
+
+ <step>
+ GRANT ALL PERMISSIONS ON KEYSPACE system_auth to %rhq_admin_username%
+ </step>
+
+ <step>
+ GRANT ALL PERMISSIONS ON KEYSPACE rhq to %rhq_admin_username%
</step>
<step>
@@ -17,6 +24,6 @@
version int,
time timestamp,
PRIMARY KEY (version, time)
- ) WITH COMPACT STORAGE;
+ ) WITH COMPACT STORAGE
</step>
</updatePlan>
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/main/resources/schema/create/0002.xml b/modules/common/cassandra-schema/src/main/resources/schema/create/0002.xml
new file mode 100644
index 0000000..417c9fa
--- /dev/null
+++ b/modules/common/cassandra-schema/src/main/resources/schema/create/0002.xml
@@ -0,0 +1,9 @@
+<updatePlan>
+ <step>
+ ALTER USER cassandra NOSUPERUSER
+ </step>
+
+ <step>
+ ALTER USER cassandra WITH PASSWORD '%cassandra_user_password%'
+ </step>
+</updatePlan>
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/main/resources/schema/drop/0001.xml b/modules/common/cassandra-schema/src/main/resources/schema/drop/0001.xml
index b826965..425fdb8 100644
--- a/modules/common/cassandra-schema/src/main/resources/schema/drop/0001.xml
+++ b/modules/common/cassandra-schema/src/main/resources/schema/drop/0001.xml
@@ -1,10 +1,9 @@
<updatePlan>
<step>
- DROP KEYSPACE rhq;
+ ALTER USER cassandra SUPERUSER
</step>
<step>
- DROP USER rhqadmin;
+ ALTER USER cassandra WITH PASSWORD 'cassandra'
</step>
-
</updatePlan>
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/main/resources/schema/drop/0002.xml b/modules/common/cassandra-schema/src/main/resources/schema/drop/0002.xml
new file mode 100644
index 0000000..fa7913a
--- /dev/null
+++ b/modules/common/cassandra-schema/src/main/resources/schema/drop/0002.xml
@@ -0,0 +1,5 @@
+<updatePlan>
+ <step>
+ DROP KEYSPACE rhq
+ </step>
+</updatePlan>
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/main/resources/schema/drop/0003.xml b/modules/common/cassandra-schema/src/main/resources/schema/drop/0003.xml
new file mode 100644
index 0000000..1147fce
--- /dev/null
+++ b/modules/common/cassandra-schema/src/main/resources/schema/drop/0003.xml
@@ -0,0 +1,5 @@
+<updatePlan>
+ <step>
+ DROP USER %rhq_admin_username%
+ </step>
+</updatePlan>
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/main/resources/topology/0001.xml b/modules/common/cassandra-schema/src/main/resources/topology/0001.xml
new file mode 100644
index 0000000..d65fc11
--- /dev/null
+++ b/modules/common/cassandra-schema/src/main/resources/topology/0001.xml
@@ -0,0 +1,8 @@
+<updatePlan>
+ <step>
+ ALTER KEYSPACE rhq WITH replication = {'class': 'SimpleStrategy', 'replication_factor': %replication_factor%}
+ </step>
+ <step>
+ ALTER KEYSPACE system_auth WITH replication = {'class': 'SimpleStrategy', 'replication_factor': %replication_factor%}
+ </step>
+</updatePlan>
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/main/resources/topology/0002.xml b/modules/common/cassandra-schema/src/main/resources/topology/0002.xml
new file mode 100644
index 0000000..24f2c0e
--- /dev/null
+++ b/modules/common/cassandra-schema/src/main/resources/topology/0002.xml
@@ -0,0 +1,25 @@
+<updatePlan>
+ <step>
+ ALTER COLUMNFAMILY rhq.metrics_index WITH gc_grace_seconds = %gc_grace_seconds%
+ </step>
+
+ <step>
+ ALTER COLUMNFAMILY rhq.raw_metrics WITH gc_grace_seconds = %gc_grace_seconds%
+ </step>
+
+ <step>
+ ALTER COLUMNFAMILY rhq.one_hour_metrics WITH gc_grace_seconds = %gc_grace_seconds%
+ </step>
+
+ <step>
+ ALTER COLUMNFAMILY rhq.six_hour_metrics WITH gc_grace_seconds = %gc_grace_seconds%
+ </step>
+
+ <step>
+ ALTER COLUMNFAMILY rhq.twenty_four_hour_metrics WITH gc_grace_seconds = %gc_grace_seconds%
+ </step>
+
+ <step>
+ ALTER COLUMNFAMILY rhq.schema_version WITH gc_grace_seconds = %gc_grace_seconds%
+ </step>
+</updatePlan>
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/main/resources/topology/create/0001.xml b/modules/common/cassandra-schema/src/main/resources/topology/create/0001.xml
deleted file mode 100644
index 5cbd7eb..0000000
--- a/modules/common/cassandra-schema/src/main/resources/topology/create/0001.xml
+++ /dev/null
@@ -1,5 +0,0 @@
-<updatePlan>
- <step>
- ALTER KEYSPACE rhq WITH replication = {'class': 'SimpleStrategy', 'replication_factor': %s};
- </step>
-</updatePlan>
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/main/resources/topology/create/0002.xml b/modules/common/cassandra-schema/src/main/resources/topology/create/0002.xml
deleted file mode 100644
index d631030..0000000
--- a/modules/common/cassandra-schema/src/main/resources/topology/create/0002.xml
+++ /dev/null
@@ -1,26 +0,0 @@
-<updatePlan>
- <step>
- ALTER COLUMNFAMILY rhq.metrics_index WITH gc_grace_seconds = %s;
- </step>
-
- <step>
- ALTER COLUMNFAMILY rhq.raw_metrics WITH gc_grace_seconds = %s;
- </step>
-
- <step>
- ALTER COLUMNFAMILY rhq.one_hour_metrics WITH gc_grace_seconds = %s;
- </step>
-
- <step>
- ALTER COLUMNFAMILY rhq.six_hour_metrics WITH gc_grace_seconds = %s;
- </step>
-
- <step>
- ALTER COLUMNFAMILY rhq.twenty_four_hour_metrics WITH gc_grace_seconds = %s;
- </step>
-
- <step>
- ALTER COLUMNFAMILY rhq.schema_version WITH gc_grace_seconds = %s;
- </step>
-
-</updatePlan>
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/main/resources/topology/update/0001.xml b/modules/common/cassandra-schema/src/main/resources/topology/update/0001.xml
deleted file mode 100644
index f2c0e57..0000000
--- a/modules/common/cassandra-schema/src/main/resources/topology/update/0001.xml
+++ /dev/null
@@ -1,9 +0,0 @@
-<updatePlan>
- <step>
- ALTER KEYSPACE system_auth WITH replication = {'class': 'SimpleStrategy', 'replication_factor': %s};
- </step>
-
- <step>
- ALTER KEYSPACE rhq WITH replication = {'class': 'SimpleStrategy', 'replication_factor': %s};
- </step>
-</updatePlan>
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/main/resources/topology/update/0002.xml b/modules/common/cassandra-schema/src/main/resources/topology/update/0002.xml
deleted file mode 100644
index d631030..0000000
--- a/modules/common/cassandra-schema/src/main/resources/topology/update/0002.xml
+++ /dev/null
@@ -1,26 +0,0 @@
-<updatePlan>
- <step>
- ALTER COLUMNFAMILY rhq.metrics_index WITH gc_grace_seconds = %s;
- </step>
-
- <step>
- ALTER COLUMNFAMILY rhq.raw_metrics WITH gc_grace_seconds = %s;
- </step>
-
- <step>
- ALTER COLUMNFAMILY rhq.one_hour_metrics WITH gc_grace_seconds = %s;
- </step>
-
- <step>
- ALTER COLUMNFAMILY rhq.six_hour_metrics WITH gc_grace_seconds = %s;
- </step>
-
- <step>
- ALTER COLUMNFAMILY rhq.twenty_four_hour_metrics WITH gc_grace_seconds = %s;
- </step>
-
- <step>
- ALTER COLUMNFAMILY rhq.schema_version WITH gc_grace_seconds = %s;
- </step>
-
-</updatePlan>
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/test/java/org/rhq/cassandra/schema/UpdateFileTest.java b/modules/common/cassandra-schema/src/test/java/org/rhq/cassandra/schema/UpdateFileTest.java
new file mode 100644
index 0000000..6034671
--- /dev/null
+++ b/modules/common/cassandra-schema/src/test/java/org/rhq/cassandra/schema/UpdateFileTest.java
@@ -0,0 +1,170 @@
+package org.rhq.cassandra.schema;
+
+import java.util.List;
+import java.util.Properties;
+import java.util.Random;
+
+import org.testng.Assert;
+import org.testng.annotations.Test;
+
+@Test
+public class UpdateFileTest {
+
+ public void noBindingOrdered() {
+ UpdateFile updateFile = new UpdateFile("no_binding.xml");
+ List<String> orderedSteps = updateFile.getOrderedSteps();
+ Assert.assertEquals(orderedSteps.size(), 4);
+
+ for (int index = 0; index < orderedSteps.size(); index++) {
+ Assert.assertEquals(Integer.parseInt(orderedSteps.get(index)), index);
+ }
+ }
+
+ public void noBindingNamedSteps() {
+ UpdateFile updateFile = new UpdateFile("no_binding_named_steps.xml");
+ List<String> orderedSteps = updateFile.getOrderedSteps();
+ Assert.assertEquals(orderedSteps.size(), 4);
+
+ for (int index = 0; index < orderedSteps.size(); index++) {
+ String step = updateFile.getNamedStep(index + "");
+ Assert.assertEquals(Integer.parseInt(step), index);
+ }
+ }
+
+ public void noBindingOrderedExtraTags() {
+ UpdateFile updateFile = new UpdateFile("no_binding.xml");
+ List<String> orderedSteps = updateFile.getOrderedSteps();
+ Assert.assertEquals(orderedSteps.size(), 4);
+ }
+
+ public void noBindingOrderedWithUnrelatedProperties() throws Exception {
+ Properties testProperties = new Properties();
+ testProperties.put("random_property_2", "12345");
+ testProperties.put("random_property_1", "67890");
+
+ UpdateFile updateFile = new UpdateFile("no_binding.xml");
+ List<String> orderedSteps = updateFile.getOrderedSteps();
+ Assert.assertEquals(orderedSteps.size(), 4);
+
+ for (int index = 0; index < orderedSteps.size(); index++) {
+ Assert.assertEquals(Integer.parseInt(orderedSteps.get(index)), index);
+ }
+ }
+
+ @Test(expectedExceptions = RuntimeException.class)
+ public void bindingErrorNoProperties() throws Exception {
+ UpdateFile updateFile = new UpdateFile("required_binding.xml");
+ updateFile.getOrderedSteps();
+ }
+
+ @Test(expectedExceptions = RuntimeException.class)
+ public void bindingErrorPartialProperties() throws Exception {
+ Properties testProperties = new Properties();
+ testProperties.put("first_property", "0");
+ testProperties.put("second_property", "1");
+
+ UpdateFile updateFile = new UpdateFile("required_binding.xml");
+ updateFile.getOrderedSteps(testProperties);
+ }
+
+ @Test(expectedExceptions = RuntimeException.class)
+ public void badFileNoUpdatePlan() {
+ UpdateFile updateFile = new UpdateFile("bad_file_1.xml");
+ updateFile.getOrderedSteps();
+ }
+
+ public void noUpdateSteps() {
+ UpdateFile updateFile = new UpdateFile("bad_file_2.xml");
+ updateFile.getOrderedSteps();
+ }
+
+ @Test(expectedExceptions = RuntimeException.class)
+ public void badFileBadXML() {
+ UpdateFile updateFile = new UpdateFile("bad_file_3.xml");
+ updateFile.getOrderedSteps();
+ }
+
+ public void binding() {
+ Random random = new Random();
+ double randomNumber = random.nextDouble() * random.nextInt();
+
+ Properties testProperties = new Properties();
+ testProperties.put("first_property", "0");
+ testProperties.put("second_property", "1");
+ testProperties.put("third_property", "2");
+ testProperties.put("fourth_property", "3");
+ testProperties.put("fifth_property", randomNumber + "");
+
+ UpdateFile updateFile = new UpdateFile("required_binding.xml");
+ List<String> orderedSteps = updateFile.getOrderedSteps(testProperties);
+ Assert.assertEquals(orderedSteps.size(), 4);
+
+ for (int index = 0; index < orderedSteps.size(); index++) {
+ if (index % 2 == 0) {
+ Assert.assertEquals(orderedSteps.get(index), index + "" + randomNumber);
+ } else {
+ Assert.assertEquals(orderedSteps.get(index), index + " testString " + randomNumber + " testString "
+ + randomNumber);
+ }
+ }
+ }
+
+ public void bindingNamedSteps() {
+ Random random = new Random();
+ double randomNumber = random.nextDouble() * random.nextInt();
+
+ Properties testProperties = new Properties();
+ testProperties.put("first_property", "0");
+ testProperties.put("second_property", "1");
+ testProperties.put("third_property", "2");
+ testProperties.put("fourth_property", "3");
+ testProperties.put("fifth_property", randomNumber + "");
+
+ UpdateFile updateFile = new UpdateFile("required_binding_named_steps.xml");
+ List<String> orderedSteps = updateFile.getOrderedSteps(testProperties);
+ Assert.assertEquals(orderedSteps.size(), 4);
+
+ for (int index = 0; index < orderedSteps.size(); index++) {
+ String step = updateFile.getNamedStep(index + "", testProperties);
+ if (index % 2 == 0) {
+ Assert.assertEquals(step, index + "" + randomNumber);
+ } else {
+ Assert.assertEquals(step, index + " testString " + randomNumber + " testString " + randomNumber);
+ }
+ }
+ }
+
+ public void bindingNamedStepPartialProperties() {
+ Random random = new Random();
+ double randomNumber = random.nextDouble() * random.nextInt();
+
+ Properties testProperties = new Properties();
+ testProperties.put("second_property", "1");
+ testProperties.put("fifth_property", randomNumber + "");
+
+ UpdateFile updateFile = new UpdateFile("required_binding_named_steps.xml");
+ String step = updateFile.getNamedStep("1", testProperties);
+ Assert.assertEquals(step, 1 + " testString " + randomNumber + " testString " + randomNumber);
+ }
+
+ @Test(expectedExceptions = RuntimeException.class)
+ public void bindingNamedStepWrongPartialProperties() {
+ Random random = new Random();
+ double randomNumber = random.nextDouble() * random.nextInt();
+
+ Properties testProperties = new Properties();
+ testProperties.put("first_property", "0");
+ //second_property is actually needed and not first_property
+ testProperties.put("fifth_property", randomNumber + "");
+
+ UpdateFile updateFile = new UpdateFile("required_binding_named_steps.xml");
+ String step = updateFile.getNamedStep("1", testProperties);
+ Assert.assertEquals(step, 1 + " testString " + randomNumber + " testString " + randomNumber);
+ }
+
+ public void bindingNamedStepNotFound() {
+ UpdateFile updateFile = new UpdateFile("required_binding_named_steps.xml");
+ String step = updateFile.getNamedStep("randomName");
+ Assert.assertNull(step);
+ }
+}
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/test/resources/bad_file_1.xml b/modules/common/cassandra-schema/src/test/resources/bad_file_1.xml
new file mode 100644
index 0000000..691bf0d
--- /dev/null
+++ b/modules/common/cassandra-schema/src/test/resources/bad_file_1.xml
@@ -0,0 +1,9 @@
+<updatePlana>
+ <step>%first_property%%fifth_property%</step>
+
+ <step>%second_property%%fifth_property%%fifth_property%</step>
+
+ <step>%third_property%%fifth_property%</step>
+
+ <step>%fourth_property%%fifth_property%%fifth_property%</step>
+</updatePlana>
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/test/resources/bad_file_2.xml b/modules/common/cassandra-schema/src/test/resources/bad_file_2.xml
new file mode 100644
index 0000000..3ef99b8
--- /dev/null
+++ b/modules/common/cassandra-schema/src/test/resources/bad_file_2.xml
@@ -0,0 +1,9 @@
+<updatePlan>
+ <steps>%first_property%%fifth_property%</steps>
+
+ <steps>%second_property%%fifth_property%%fifth_property%</steps>
+
+ <steps>%third_property%%fifth_property%</steps>
+
+ <steps>%fourth_property%%fifth_property%%fifth_property%</steps>
+</updatePlan>
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/test/resources/bad_file_3.xml b/modules/common/cassandra-schema/src/test/resources/bad_file_3.xml
new file mode 100644
index 0000000..4461d6a
--- /dev/null
+++ b/modules/common/cassandra-schema/src/test/resources/bad_file_3.xml
@@ -0,0 +1,9 @@
+<updatePlan>
+ <steps>%first_property%%fifth_property%</steps>
+
+ <steps>%second_property%%fifth_property%%fifth_property%</steps>
+
+ <random>%third_property%%fifth_property%</steps>
+
+ <steps>%fourth_property%%fifth_property%%fifth_property%</steps>
+</updatePlan>
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/test/resources/no_binding.xml b/modules/common/cassandra-schema/src/test/resources/no_binding.xml
new file mode 100644
index 0000000..ad3fbb7
--- /dev/null
+++ b/modules/common/cassandra-schema/src/test/resources/no_binding.xml
@@ -0,0 +1,9 @@
+<updatePlan>
+ <step>0</step>
+
+ <step>1</step>
+
+ <step>2</step>
+
+ <step>3</step>
+</updatePlan>
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/test/resources/no_binding_extra_tags.xml b/modules/common/cassandra-schema/src/test/resources/no_binding_extra_tags.xml
new file mode 100644
index 0000000..88cb24a
--- /dev/null
+++ b/modules/common/cassandra-schema/src/test/resources/no_binding_extra_tags.xml
@@ -0,0 +1,12 @@
+<updatePlan>
+ <step>0</step>
+
+ <step>1</step>
+
+ <step>2</step>
+
+ <step>3</step>
+
+ <steps>3234</steps>
+ <what>??</what>
+</updatePlan>
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/test/resources/no_binding_named_steps.xml b/modules/common/cassandra-schema/src/test/resources/no_binding_named_steps.xml
new file mode 100644
index 0000000..843ad0f
--- /dev/null
+++ b/modules/common/cassandra-schema/src/test/resources/no_binding_named_steps.xml
@@ -0,0 +1,9 @@
+<updatePlan>
+ <step name="0">0</step>
+
+ <step name="1">1</step>
+
+ <step name="2">2</step>
+
+ <step name="3">3</step>
+</updatePlan>
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/test/resources/required_binding.xml b/modules/common/cassandra-schema/src/test/resources/required_binding.xml
new file mode 100644
index 0000000..7332a99
--- /dev/null
+++ b/modules/common/cassandra-schema/src/test/resources/required_binding.xml
@@ -0,0 +1,9 @@
+<updatePlan>
+ <step>%first_property%%fifth_property%</step>
+
+ <step>%second_property% testString %fifth_property% testString %fifth_property%</step>
+
+ <step>%third_property%%fifth_property%</step>
+
+ <step>%fourth_property% testString %fifth_property% testString %fifth_property%</step>
+</updatePlan>
\ No newline at end of file
diff --git a/modules/common/cassandra-schema/src/test/resources/required_binding_named_steps.xml b/modules/common/cassandra-schema/src/test/resources/required_binding_named_steps.xml
new file mode 100644
index 0000000..f50e7a5
--- /dev/null
+++ b/modules/common/cassandra-schema/src/test/resources/required_binding_named_steps.xml
@@ -0,0 +1,9 @@
+<updatePlan>
+ <step name="0">%first_property%%fifth_property%</step>
+
+ <step name="1">%second_property% testString %fifth_property% testString %fifth_property%</step>
+
+ <step name="2">%third_property%%fifth_property%</step>
+
+ <step name="3">%fourth_property% testString %fifth_property% testString %fifth_property%</step>
+</updatePlan>
\ No newline at end of file
diff --git a/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java b/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java
index 160b5dd..5c8002a 100644
--- a/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java
+++ b/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java
@@ -487,7 +487,7 @@ public class InstallerServiceImpl implements InstallerService {
}
log("Install RHQ schema along with updates to Cassandra.");
storageNodeSchemaManager.install();
- storageNodeSchemaManager.updateTopology(true);
+ storageNodeSchemaManager.updateTopology();
} else {
log("Ignoring Cassandra schema - installer will assume it exists and is already up-to-date.");
}
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
index bf25daf..34e5ebd 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
@@ -968,17 +968,15 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
operationManager.scheduleGroupOperation(subjectManager.getOverlord(), schedule);
}
- private boolean updateTopology(List<StorageNode> storageNodes) {
+ private void updateTopology(List<StorageNode> storageNodes) {
String username = getRequiredStorageProperty(USERNAME_PROPERTY);
String password = getRequiredStorageProperty(PASSWORD_PROPERTY);
SchemaManager schemaManager = new SchemaManager(username, password, storageNodes);
try{
- return schemaManager.updateTopology(false);
+ schemaManager.updateTopology();
} catch (Exception e) {
log.error("An error occurred while applying schema topology changes", e);
}
-
- return false;
}
private String getRequiredStorageProperty(String property) {
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java
index 54ca4c2..aa55cb4 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java
@@ -135,17 +135,15 @@ public class StorageNodeMaintenanceJob extends AbstractStatefulJob {
runNodeMaintenance(seedList, isReadRepairNeeded);
}
- private boolean updateTopology(List<StorageNode> storageNodes) throws JobExecutionException {
+ private void updateTopology(List<StorageNode> storageNodes) throws JobExecutionException {
String username = getRequiredStorageProperty(USERNAME_PROP);
String password = getRequiredStorageProperty(PASSWORD_PROP);
SchemaManager schemaManager = new SchemaManager(username, password, storageNodes);
try{
- return schemaManager.updateTopology(false);
+ schemaManager.updateTopology();
} catch (Exception e) {
log.error("An error occurred while applying schema topology changes", e);
}
-
- return false;
}
private List<StorageNode> waitForClustering(List<StorageNode> storageNodes) {
diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java
index bd171a4..aafa481 100644
--- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java
+++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java
@@ -154,7 +154,7 @@ public class StorageNodeComponentITest {
SchemaManager schemaManager = new SchemaManager("rhqadmin", "rhqadmin", "127.0.0.1|7399|9142");
schemaManager.install();
- schemaManager.updateTopology(true);
+ schemaManager.updateTopology();
}
private ProcessExecution getProcessExecution(File binDir) {
10 years, 9 months
[rhq] modules/enterprise
by John Sanda
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java | 53 +++++++++-
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageConfigurationException.java | 23 ++++
2 files changed, 71 insertions(+), 5 deletions(-)
New commits:
commit 69656f42348a8ee43e83e77d1d0bc565b290ba8f
Author: John Sanda <jsanda(a)redhat.com>
Date: Wed Jul 31 21:59:34 2013 -0400
adding initial support for setting gossip port for new storage node
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
index 1c96e27..bf25daf 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
@@ -94,6 +94,7 @@ import org.rhq.enterprise.server.resource.group.ResourceGroupManagerLocal;
import org.rhq.enterprise.server.rest.reporting.MeasurementConverter;
import org.rhq.enterprise.server.scheduler.SchedulerLocal;
import org.rhq.enterprise.server.scheduler.jobs.StorageNodeMaintenanceJob;
+import org.rhq.enterprise.server.storage.StorageConfigurationException;
import org.rhq.enterprise.server.util.CriteriaQueryGenerator;
import org.rhq.enterprise.server.util.CriteriaQueryRunner;
import org.rhq.enterprise.server.util.LookupUtil;
@@ -214,7 +215,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
schedule.setOperationName("updateKnownNodes");
Configuration parameters = new Configuration();
- parameters.put(createPropertyListOfAddresses("ipAddresses", combine(getStorageNodes(), newStorageNode)));
+ parameters.put(createPropertyListOfAddresses("ipAddresses", combine(getClusteredStorageNodes(), newStorageNode)));
schedule.setParameters(parameters);
operationManager.scheduleGroupOperation(subjectManager.getOverlord(), schedule);
@@ -451,6 +452,11 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
return query.getResultList();
}
+ private List<StorageNode> getClusteredStorageNodes() {
+ return entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE, StorageNode.class)
+ .setParameter("operationMode", OperationMode.NORMAL).getResultList();
+ }
+
@Override
@RequiredPermission(Permission.MANAGE_SETTINGS)
public PageList<StorageNode> findStorageNodesByCriteria(Subject subject, StorageNodeCriteria criteria) {
@@ -840,7 +846,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
log.info("Preparing to bootstrap " + storageNode + " into cluster...");
}
- List<StorageNode> existingStorageNodes = getStorageNodes();
+ List<StorageNode> existingStorageNodes = getClusteredStorageNodes();
ResourceOperationSchedule schedule = new ResourceOperationSchedule();
schedule.setResource(storageNode.getResource());
@@ -850,15 +856,52 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
Configuration parameters = new Configuration();
parameters.put(new PropertySimple("cqlPort", existingStorageNodes.get(0).getCqlPort()));
- // TODO need to add support for storage_port in cassandra/storage plugins
- parameters.put(new PropertySimple("gossipPort", 7100));
- parameters.put(createPropertyListOfAddresses("storageNodeIPAddresses", getStorageNodes()));
+ parameters.put(new PropertySimple("gossipPort", getGossipPort(storageNode, existingStorageNodes)));
+ parameters.put(createPropertyListOfAddresses("storageNodeIPAddresses", getClusteredStorageNodes()));
schedule.setParameters(parameters);
operationManager.scheduleResourceOperation(subjectManager.getOverlord(), schedule);
}
+ private Integer getGossipPort(StorageNode newStorageNode, List<StorageNode> storageNodes) {
+ if (log.isInfoEnabled()) {
+ log.info("Looking up gossip port for new storage node " + newStorageNode);
+ }
+ try {
+ StorageNode node = null;
+ Configuration resourceConfig = null;
+ for (StorageNode storageNode : storageNodes) {
+ resourceConfig = configurationManager.getLiveResourceConfiguration(subjectManager.getOverlord(),
+ storageNode.getResource().getId(), false);
+ if (resourceConfig == null) {
+ log.warn("Failed to load resource configuration for storage node " + newStorageNode.getResource());
+ } else {
+ node = storageNode;
+ break;
+ }
+ }
+ if (resourceConfig == null) {
+ log.error("Failed to obtain gossip port from existing storage nodes");
+ throw new StorageConfigurationException("Failed to obtain gossip port from existing storage nodes");
+ }
+
+ PropertySimple property = resourceConfig.getSimple("gossipPort");
+ if (property == null) {
+ throw new StorageConfigurationException("The resource configuration for " + node.getResource() +
+ "did not include the required property [gossipPort]");
+ }
+ Integer port = property.getIntegerValue();
+ log.info("Found gossip port set to " + port);
+ return property.getIntegerValue();
+ } catch (Exception e) {
+ if (e instanceof StorageConfigurationException) {
+ throw (StorageConfigurationException) e;
+ }
+ throw new RuntimeException("An error occurred while trying to obtain the gossip port", e);
+ }
+ }
+
@Override
public void runAddNodeMaintenance() {
log.info("Preparing to schedule addNodeMaintenance on the storage cluster...");
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageConfigurationException.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageConfigurationException.java
new file mode 100644
index 0000000..dc616a8
--- /dev/null
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageConfigurationException.java
@@ -0,0 +1,23 @@
+package org.rhq.enterprise.server.storage;
+
+/**
+ * @author John Sanda
+ */
+public class StorageConfigurationException extends RuntimeException {
+
+ public StorageConfigurationException() {
+ super();
+ }
+
+ public StorageConfigurationException(String message) {
+ super(message);
+ }
+
+ public StorageConfigurationException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+ public StorageConfigurationException(Throwable cause) {
+ super(cause);
+ }
+}
10 years, 9 months