modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java | 15 modules/plugins/rhq-storage/pom.xml | 113 +++++ modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java | 148 ++++++ modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml | 10 modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java | 216 ++++++++++ modules/plugins/rhq-storage/src/test/resources/log4j.properties | 42 + 6 files changed, 534 insertions(+), 10 deletions(-)
New commits: commit b537244bad778a80f6fdf92880abc245eed465ec Author: John Sanda jsanda@redhat.com Date: Tue Jul 23 22:23:06 2013 -0400
initial commit for StorageNodeComponentITest
This is a first stab at some integration tests for the storage plugin.
diff --git a/modules/plugins/rhq-storage/pom.xml b/modules/plugins/rhq-storage/pom.xml index a88f56e..df79e40 100644 --- a/modules/plugins/rhq-storage/pom.xml +++ b/modules/plugins/rhq-storage/pom.xml @@ -10,11 +10,16 @@
<groupId>org.rhq</groupId> <artifactId>rhq-rhqstorage-plugin</artifactId> - <packaging>jar</packaging>
<name>RHQ Storage Plugin</name> <description>A plugin for managing RHQ Storage Nodes</description>
+ <properties> + <pc.basedir>${project.build.directory}/plugin-container</pc.basedir> + <pc.plugins.dir>${pc.basedir}/plugins</pc.plugins.dir> + <pc.lib.dir>${pc.basedir}/lib</pc.lib.dir> + </properties> + <dependencies> <dependency> <groupId>${rhq.groupId}</groupId> @@ -27,7 +32,6 @@ <groupId>${rhq.groupId}</groupId> <artifactId>rhq-cassandra-plugin</artifactId> <version>${project.version}</version> - <!--<scope>provided</scope>--> </dependency>
<dependency> @@ -35,8 +39,113 @@ <artifactId>org-mc4j-ems</artifactId> <scope>provided</scope> </dependency> + + <dependency> + <groupId>${project.groupId}</groupId> + <artifactId>rhq-cassandra-ccm-core</artifactId> + <version>${project.version}</version> + <scope>test</scope> + </dependency> + + <dependency> + <groupId>${project.groupId}</groupId> + <artifactId>rhq-platform-plugin</artifactId> + <version>${project.version}</version> + <scope>test</scope> + </dependency> + + <dependency> + <groupId>${project.groupId}</groupId> + <artifactId>rhq-cassandra-schema</artifactId> + <version>${project.version}</version> + </dependency> </dependencies>
+ <build> + <plugins> + <plugin> + <artifactId>maven-antrun-plugin</artifactId> + <executions> + <execution> + <phase>pre-integration-test</phase> + <configuration> + <target> + <property name="sigar.dir" value="${project.build.directory/sigar}"/> + + <mkdir dir="${pc.basedir}"/> + <mkdir dir="${pc.lib.dir}"/> + <mkdir dir="${pc.plugins.dir}"/> + + <copy file="${org.rhq:rhq-platform-plugin:jar}" todir="${pc.plugins.dir}"/> + <copy file="${org.rhq:rhq-jmx-plugin:jar}" todir="${pc.plugins.dir}"/> + <copy file="${org.rhq:rhq-cassandra-plugin:jar}" todir="${pc.plugins.dir}"/> + <copy file="${project.build.directory}/${project.build.finalName}.jar" todir="${pc.plugins.dir}"/> + + <unzip src="${org.hyperic:sigar-dist:zip}" dest="${sigar.dir}"> + <patternset> + <include name="**/lib/sigar.jar" /> + <include name="**/lib/bcel*.jar" /> + <include name="**/lib/*.so" /> + <include name="**/lib/*.sl" /> + <include name="**/lib/*.dll" /> + <include name="**/lib/*.dylib" /> + </patternset> + </unzip> + <move todir="${pc.lib.dir}" flatten="true"> + <fileset dir="${sigar.dir}" includes="**/lib/*"/> + </move> + <delete dir="${sigar.dir}"/> + </target> + </configuration> + <goals> + <goal>run</goal> + </goals> + </execution> + </executions> + </plugin> + + <plugin> + <artifactId>maven-failsafe-plugin</artifactId> + <version>2.13</version> + <executions> + <execution> + <id>integration-test</id> + <goals> + <goal>integration-test</goal> + </goals> + <configuration> + <includes> + <include>**/*ITest.java</include> + </includes> + <argLine>-Djava.library.path=${pc.lib.dir}</argLine> + <systemPropertyVariables> + <pc.plugins.dir>${pc.plugins.dir}</pc.plugins.dir> + </systemPropertyVariables> + </configuration> + </execution> + <execution> + <id>verify</id> + <goals> + <goal>verify</goal> + </goals> + <configuration> + <testFailureIgnore>false</testFailureIgnore> + </configuration> + </execution> + </executions> + </plugin> + + <plugin> + <artifactId>maven-surefire-plugin</artifactId> + <configuration> + <excludes> + <exclude>**/*ITest.java</exclude> + </excludes> + </configuration> + </plugin> + </plugins> + </build> + <profiles> <profile> <id>dev</id> diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java new file mode 100644 index 0000000..cd9f148 --- /dev/null +++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java @@ -0,0 +1,216 @@ +package org.rhq.plugins.storage; + +import static java.util.Arrays.asList; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotNull; + +import java.io.File; +import java.net.InetAddress; +import java.util.Set; + +import com.google.common.collect.Sets; + +import org.testng.annotations.AfterSuite; +import org.testng.annotations.BeforeSuite; +import org.testng.annotations.Test; + +import org.rhq.cassandra.CassandraClusterManager; +import org.rhq.cassandra.ClusterInitService; +import org.rhq.cassandra.Deployer; +import org.rhq.cassandra.DeploymentOptions; +import org.rhq.cassandra.DeploymentOptionsFactory; +import org.rhq.cassandra.schema.SchemaManager; +import org.rhq.core.clientapi.server.discovery.InventoryReport; +import org.rhq.core.domain.cloud.StorageNode; +import org.rhq.core.domain.configuration.Configuration; +import org.rhq.core.domain.measurement.Availability; +import org.rhq.core.domain.measurement.AvailabilityType; +import org.rhq.core.domain.resource.Resource; +import org.rhq.core.domain.resource.ResourceType; +import org.rhq.core.pc.PluginContainer; +import org.rhq.core.pc.PluginContainerConfiguration; +import org.rhq.core.pc.inventory.InventoryManager; +import org.rhq.core.pc.operation.OperationContextImpl; +import org.rhq.core.pc.operation.OperationManager; +import org.rhq.core.pc.operation.OperationServicesAdapter; +import org.rhq.core.pc.plugin.FileSystemPluginFinder; +import org.rhq.core.pluginapi.operation.OperationServicesResult; +import org.rhq.core.pluginapi.operation.OperationServicesResultCode; +import org.rhq.core.pluginapi.util.ProcessExecutionUtility; +import org.rhq.core.system.ProcessExecution; +import org.rhq.core.system.ProcessExecutionResults; +import org.rhq.core.system.SystemInfo; +import org.rhq.core.system.SystemInfoFactory; + +/** + * @author John Sanda + */ +public class StorageNodeComponentITest { + + private File basedir; + + private Resource storageNode; + + @BeforeSuite + public void deployStorageNodeAndPluginContainer() throws Exception { + basedir = new File("target", "rhq-storage"); + + deployStorageNode(); + + initPluginContainer(); + } + + private void deployStorageNode() throws Exception { + DeploymentOptionsFactory factory = new DeploymentOptionsFactory(); + DeploymentOptions deploymentOptions = factory.newDeploymentOptions(); + String address = "127.0.0.1"; + + deploymentOptions.setSeeds(address); + deploymentOptions.setListenAddress(address); + deploymentOptions.setRpcAddress(address); + deploymentOptions.setBasedir(basedir.getAbsolutePath()); + deploymentOptions.setCommitLogDir(new File(basedir, "commit_log").getAbsolutePath()); + deploymentOptions.setDataDir(new File(basedir, "data").getAbsolutePath()); + deploymentOptions.setSavedCachesDir(new File(basedir, "saved_caches").getAbsolutePath()); + deploymentOptions.setCommitLogDir(new File(basedir, "logs").getAbsolutePath()); + deploymentOptions.setLoggingLevel("DEBUG"); + deploymentOptions.setNativeTransportPort(9142); + deploymentOptions.setJmxPort(7399); + deploymentOptions.setHeapSize("256M"); + deploymentOptions.setHeapNewSize("64M"); + + deploymentOptions.load(); + + Deployer deployer = new Deployer(); + deployer.setDeploymentOptions(deploymentOptions); + + deployer.unzipDistro(); + deployer.applyConfigChanges(); + deployer.updateFilePerms(); + deployer.updateStorageAuthConf(Sets.newHashSet(InetAddress.getByName(address))); + + File binDir = new File(basedir, "bin"); + SystemInfo systemInfo = SystemInfoFactory.createSystemInfo(); + + File startScript = new File(binDir, "cassandra"); + ProcessExecution startScriptExe = ProcessExecutionUtility.createProcessExecution(startScript); + + startScriptExe.addArguments(asList("-p", "cassandra.pid")); + startScriptExe.setCaptureOutput(true); + ProcessExecutionResults results = systemInfo.executeProcess(startScriptExe); + + assertEquals(results.getExitCode(), (Integer) 0, "Cassandra failed to start: " + results.getCapturedOutput()); + + StorageNode storageNode = new StorageNode(); + storageNode.parseNodeInformation("127.0.0.1|7399|9142"); + + ClusterInitService clusterInitService = new ClusterInitService(); + clusterInitService.waitForClusterToStart(asList(storageNode)); + + SchemaManager schemaManager = new SchemaManager("rhqadmin", "rhqadmin", "127.0.0.1|7399|9142"); + schemaManager.install(); + schemaManager.updateTopology(true); + } + + private void initPluginContainer() { + PluginContainerConfiguration pcConfig = new PluginContainerConfiguration(); + File pluginsDir = new File(System.getProperty("pc.plugins.dir")); + pcConfig.setPluginDirectory(pluginsDir); + pcConfig.setPluginFinder(new FileSystemPluginFinder(pluginsDir)); + + pcConfig.setInsideAgent(false); + PluginContainer.getInstance().setConfiguration(pcConfig); + PluginContainer.getInstance().initialize(); + } + + @AfterSuite + public void ShutdownPluginContainerAndStorageNode() throws Exception { + PluginContainer.getInstance().shutdown(); + shutdownStorageNodeIfNecessary(); + } + + private void shutdownStorageNodeIfNecessary() throws Exception { + File binDir = new File(basedir, "bin"); + File pidFile = new File(binDir, "cassandra.pid"); + + if (pidFile.exists()) { + CassandraClusterManager ccm = new CassandraClusterManager(); + ccm.killNode(basedir); + } + } + + @Test + public void discoverStorageNode() { + InventoryManager inventoryManager = PluginContainer.getInstance().getInventoryManager(); + InventoryReport inventoryReport = inventoryManager.executeServerScanImmediately(); + + if (inventoryReport.getAddedRoots().isEmpty()) { + // could be empty if the storage node is already in inventory from + // a prior discovery scan. + Resource platform = inventoryManager.getPlatform(); + storageNode = findCassandraNode(platform.getChildResources()); + } else { + storageNode = findCassandraNode(inventoryReport.getAddedRoots()); + } + + assertNotNull(storageNode, "Failed to discover Storage Node instance"); + assertNodeIsUp("Expected " + storageNode + " to be UP after discovery"); + } + + @Test(dependsOnMethods = "discoverStorageNode") + public void shutdownStorageNode() throws Exception { + OperationManager operationManager = PluginContainer.getInstance().getOperationManager(); + OperationServicesAdapter operationsService = new OperationServicesAdapter(operationManager); + + long timeout = 1000 * 60; + OperationContextImpl operationContext = new OperationContextImpl(storageNode.getId()); + OperationServicesResult result = operationsService.invokeOperation(operationContext, "shutdown", + new Configuration(), timeout); + + assertEquals(result.getResultCode(), OperationServicesResultCode.SUCCESS, "The shutdown operation failed"); + // TODO why is this failing? + assertNodeIsDown("Expected " + storageNode + " to be DOWN after shutting it down"); + } + + private void assertNodeIsUp(String msg) { + executeAvailabilityScan(); + + Availability availability = getAvailability(); + + assertNotNull(availability, "Unable to determine availability for " + storageNode); + assertEquals(availability.getAvailabilityType(), AvailabilityType.UP, msg); + } + + private void assertNodeIsDown(String msg) { + executeAvailabilityScan(); + + Availability availability = getAvailability(); + + assertNotNull(availability, "Unable to determine availability for " + storageNode); + assertEquals(availability.getAvailabilityType(), AvailabilityType.DOWN, msg); + } + + private Availability getAvailability() { + InventoryManager inventoryManager = PluginContainer.getInstance().getInventoryManager(); + return inventoryManager.getAvailabilityIfKnown(storageNode); + } + + private void executeAvailabilityScan() { + InventoryManager inventoryManager = PluginContainer.getInstance().getInventoryManager(); + inventoryManager.executeAvailabilityScanImmediately(false, true); + } + + private Resource findCassandraNode(Set<Resource> resources) { + for (Resource resource : resources) { + if (isCassandraNode(resource.getResourceType())) { + return resource; + } + } + return null; + } + + private boolean isCassandraNode(ResourceType type) { + return type.getPlugin().equals("RHQStorage") && type.getName().equals("RHQ Storage Node"); + } + +} diff --git a/modules/plugins/rhq-storage/src/test/resources/log4j.properties b/modules/plugins/rhq-storage/src/test/resources/log4j.properties new file mode 100644 index 0000000..67db049 --- /dev/null +++ b/modules/plugins/rhq-storage/src/test/resources/log4j.properties @@ -0,0 +1,42 @@ +# +# /* +# * RHQ Management Platform +# * Copyright (C) 2005-2012 Red Hat, Inc. +# * All rights reserved. +# * +# * This program is free software; you can redistribute it and/or modify +# * it under the terms of the GNU General Public License, version 2, as +# * published by the Free Software Foundation, and/or the GNU Lesser +# * General Public License, version 2.1, also as published by the Free +# * Software Foundation. +# * +# * This program is distributed in the hope that it will be useful, +# * but WITHOUT ANY WARRANTY; without even the implied warranty of +# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# * GNU General Public License and the GNU Lesser General Public License +# * for more details. +# * +# * You should have received a copy of the GNU General Public License +# * and the GNU Lesser General Public License along with this program; +# * if not, write to the Free Software Foundation, Inc., +# * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# */ +# + +log4j.rootCategory=WARN, FILE, CONSOLE + +log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender +log4j.appender.FILE.DatePattern='.'yyyy-MM-dd +log4j.appender.FILE.File=./target/test.log +log4j.appender.FILE.layout=org.apache.log4j.PatternLayout +log4j.appender.FILE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n +#log4j.appender.A1.layout.ConversionPattern=%d [%t] %-5p %c - %m%n +log4j.appender.FILE.Append=false + +log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender +log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout +#log4j.appender.CONSOLE.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n +log4j.appender.CONSOLE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n + +log4j.logger.org.rhq=DEBUG +log4j.logger.com.datastax=DEBUG
commit 83e5b228871c9a8352e98a12e0db76f8f4ea982e Author: John Sanda jsanda@redhat.com Date: Tue Jul 23 22:16:41 2013 -0400
first stab at prepareForBootstrap operation (which is currently broken)
This is clearly broken from some manual testing I did. Given that the implementation is a bit sloppy at the moment, this is a good time to get some automated tests in place. The operation will perform the following steps in the ordered specified:
1) shut down the storage node 2) update cassandra.yaml 3) update rhq-storage-auth.conf 4) restart the node
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java index 0037bfe..f76da22 100644 --- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java +++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java @@ -111,7 +111,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent clusterBuilder = clusterBuilder.withCredentials(username, password); }
- this.cassandraSession = clusterBuilder.build().connect(clusterName); +// this.cassandraSession = clusterBuilder.build().connect(clusterName); } catch (Exception e) { LOG.error("Connect to Cassandra " + host + ":" + nativePort, e); throw e; @@ -196,7 +196,17 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent operation = storageService.getOperation("drain", emptyParams); operation.invoke((Object[]) emptyParams);
- ProcessInfo process = context.getNativeProcess(); + return stopNode(); + } + + protected OperationResult stopNode() { + ProcessInfo process = getResourceContext().getNativeProcess(); + + if (processInfo == null) { + LOG.warn("Failed to obtain process info. It appears Cassandra is already shutdown."); + return new OperationResult("Failed to obtain process info. It appears Cassandra is already shutdown."); + } + long pid = process.getPid(); try { process.kill("KILL"); @@ -209,6 +219,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent } }
+ protected OperationResult startNode() { ResourceContext<?> context = getResourceContext(); Configuration pluginConfig = context.getPluginConfiguration(); diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java index 3b0aa5b..d9b35b9 100644 --- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java +++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java @@ -26,11 +26,15 @@ package org.rhq.plugins.storage;
import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; import java.io.FileWriter; import java.io.IOException; import java.io.StringReader; +import java.util.ArrayList; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set;
import org.apache.commons.logging.Log; @@ -39,6 +43,8 @@ import org.mc4j.ems.connection.EmsConnection; import org.mc4j.ems.connection.bean.EmsBean; import org.mc4j.ems.connection.bean.attribute.EmsAttribute; import org.mc4j.ems.connection.bean.operation.EmsOperation; +import org.yaml.snakeyaml.DumperOptions; +import org.yaml.snakeyaml.Yaml;
import org.rhq.core.domain.configuration.Configuration; import org.rhq.core.domain.configuration.ConfigurationUpdateStatus; @@ -48,6 +54,7 @@ import org.rhq.core.domain.configuration.PropertyMap; import org.rhq.core.domain.configuration.PropertySimple; import org.rhq.core.pluginapi.configuration.ConfigurationFacet; import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport; +import org.rhq.core.pluginapi.inventory.ResourceContext; import org.rhq.core.pluginapi.operation.OperationFacet; import org.rhq.core.pluginapi.operation.OperationResult; import org.rhq.core.util.StringUtil; @@ -96,6 +103,8 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper return updateConfiguration(parameters); } else if (name.equals("updateKnownNodes")) { return updateKnownNodes(parameters); + } else if (name.equals("prepareForBootstrap")) { + return prepareForBootstrap(parameters); } else { return super.invokeOperation(name, parameters); } @@ -132,6 +141,18 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper ipAddresses.add(propertySimple.getStringValue()); }
+ if (updateAuthFile(result, ipAddresses)) return result; + + EmsBean authBean = getEmsConnection().getBean("org.rhq.cassandra.auth:type=RhqInternodeAuthenticator"); + EmsOperation emsOperation = authBean.getOperation("reloadConfiguration"); + emsOperation.invoke(); + + result.setSimpleResult("Successfully updated the set of known nodes."); + + return result; + } + + private boolean updateAuthFile(OperationResult result, Set<String> ipAddresses) { log.info("Updating known nodes to " + ipAddresses);
File confDir = new File(getBasedir(), "conf"); @@ -150,7 +171,7 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper log.error(msg); result.setErrorMessage(msg);
- return result; + return true; } }
@@ -161,7 +182,7 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper "to unexpected error"; log.error(msg, e); result.setErrorMessage(msg + ": " + ThrowableUtil.getRootMessage(e)); - return result; + return true; }
try { @@ -176,18 +197,127 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper } result.setErrorMessage("There was an unexpected error while updating " + authFile + ". Make sure that " + "it matches " + authBackupFile + " and then reschedule the operation."); + return true; + } + return false; + } + + private OperationResult prepareForBootstrap(Configuration params) { + log.info("Preparing " + this + " for bootstrap..."); + + ResourceContext context = getResourceContext(); + OperationResult result = new OperationResult(); + + log.info("Stopping storage node"); + OperationResult stopNodeResult = stopNode(); + if (stopNodeResult.getErrorMessage() != null) { + log.error("Failed to stop storage node " + this + " Cannot prepare the node for bootstrap which means " + + "that the storage node cannot join the cluster. Make sure the storage node is not running and retry " + + "the operation"); + result.setErrorMessage("Failed to stop storage node. Cannot prepare the node for bootstrap which means " + + "that it cannot join the cluster. Make sure that the node is not running and retry the operation. " + + "Stopping the storage node failed with this error: " + stopNodeResult.getErrorMessage()); return result; }
- EmsBean authBean = getEmsConnection().getBean("org.rhq.cassandra.auth:type=RhqInternodeAuthenticator"); - EmsOperation emsOperation = authBean.getOperation("reloadConfiguration"); - emsOperation.invoke(); + Configuration pluginConfig = context.getPluginConfiguration(); + String yamlProp = pluginConfig.getSimpleValue("yamlConfiguration"); + File yamlFile = new File(yamlProp);
- result.setSimpleResult("Successfully updated the set of known nodes."); + DumperOptions options = new DumperOptions(); + options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); + Yaml yaml = new Yaml(options); + + Map yamlConfig = null; + try { + yamlConfig = (Map) yaml.load(new FileInputStream(yamlFile)); + } catch (FileNotFoundException e) { + log.error("Failed to load " + yamlFile, e); + log.error("Cannot prepare " + this + " for bootstrap. " + yamlFile + " must exist in order to make the " + + "necessary configuration changes."); + result.setErrorMessage("Cannot prepare storage node for bootstrap. It appears that " + yamlFile + + " does not exist. Make sure that it exists so that the necessary configuration changes can be made."); + + return result; + } + + purgeDir(getCommitLogDir(yamlConfig)); + for (File dataDir : getDataDirs(yamlConfig)) { + purgeDir(dataDir); + } + purgeDir(getSavedCachesDir(yamlConfig)); + + log.info("Updating cluster settings"); + + String address = pluginConfig.getSimpleValue("host"); + List<String> seeds = getAddresses(params.getList("storageNodeIPAddresses")); + // Make sure this node's address is not in the list; otherwise, it + // won't bootstrap properly. + seeds.remove(address); + try { + updateSeedsList(seeds); + } catch (IOException e) { + log.error("Failed to update seeds property in " + yamlFile, e); + result.setErrorMessage("Failed to prepared node for bootstrap due to unexpected error that occurred " + + "while updating seeds property in " + yamlFile + ":\n" + ThrowableUtil.getAllMessages(e)); + return result; + } + + if (updateAuthFile(result, new HashSet<String>(seeds))) { + return result; + } + + int cqlPort = Integer.parseInt(params.getSimpleValue("cqlPort")); + int gossipPort = Integer.parseInt(params.getSimpleValue("gossipPort")); + + yamlConfig.put("native_transport_port", cqlPort); + yamlConfig.put("storage_port", gossipPort); + + try { + yaml.dump(yamlConfig, new FileWriter(yamlFile)); + } catch (IOException e) { + log.error("Could not update cluster settings in " + yamlFile, e); + result.setErrorMessage("Could not update cluster settings in " + yamlFile + ":\n" + + ThrowableUtil.getAllMessages(e)); + return result; + } + + log.info(this + " is ready to be bootstrap. Restarting storage node..."); + OperationResult startResult = startNode(); + if (startResult.getErrorMessage() != null) { + log.error("Failed to restart storage node:\n" + startResult.getErrorMessage()); + result.setErrorMessage("Failed to restart storage node:\n" + startResult.getErrorMessage()); + } else { + result.setSimpleResult("The storage node was succesfully updated is now bootstrapping into the cluster."); + }
return result; }
+ private void purgeDir(File dir) { + log.info("Purging " + dir); + FileUtil.purge(dir, true); + } + + private File getCommitLogDir(Map yamlConfig) { + return new File((String) yamlConfig.get("commitlog_directory")); + } + + private List<File> getDataDirs(Map yamlConfig) { + List<File> dirs = new ArrayList<File>(); + List<String> dirNames = (List<String>) yamlConfig.get("data_file_directories"); + + for (String dirName : dirNames) { + dirs.add(new File(dirName)); + } + + return dirs; + } + + private File getSavedCachesDir(Map yamlConfig) { + return new File((String) yamlConfig.get("saved_caches_directory")); + } + private OperationResult nodeAdded(Configuration params) { boolean runRepair = params.getSimple("runRepair").getBooleanValue(); boolean updateSeedsList = params.getSimple("updateSeedsList").getBooleanValue(); @@ -405,4 +535,10 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper boolean succeeded; String details; } + + @Override + public String toString() { + return StorageNodeComponent.class.getSimpleName() + "[resourceKey: " + getResourceContext().getResourceKey() + + "]"; + } } diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml index 1e39d6c..cd84de6 100644 --- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml +++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml @@ -101,6 +101,16 @@ </parameters> </operation>
+ <operation name="prepareForBootstrap"> + <parameters> + <c:simple-property name="cqlPort" type="integer" displayName="CQL Port"/> + <c:simple-property name="gossipPort" type="integer"/> + <c:list-property name="storageNodeIPAddresses" displayName="Storage Node IP Addresses"> + <c:simple-property name="storageNodeIPAddress"/> + </c:list-property> + </parameters> + </operation> + <operation name="prepareForUpgrade" description="Prepares the storage node for upgrade (this operation consists of following steps: 1) turning off the RPC server, 2) turning off the gossiper, 3) taking the snapshot (backuping the data), 4) invoking the drain operation"> <parameters> <c:simple-property name="snapshotName" required="false" type="string" displayName="Snapshot Name"
rhq-commits@lists.fedorahosted.org