modules/plugins/hadoop/src/main/java/org/rhq/plugins/hadoop/HadoopServiceComponent.java |
66 +++++++++-
modules/plugins/hadoop/src/main/resources/META-INF/rhq-plugin.xml |
13 +
2 files changed, 78 insertions(+), 1 deletion(-)
New commits:
commit d0e66482325770f111830e8f40fbb4cb04d0e339
Author: Lukas Krejci <lkrejci(a)redhat.com>
Date: Wed Aug 1 18:50:56 2012 +0200
some more config options + initial support for log polling
diff --git
a/modules/plugins/hadoop/src/main/java/org/rhq/plugins/hadoop/HadoopServiceComponent.java
b/modules/plugins/hadoop/src/main/java/org/rhq/plugins/hadoop/HadoopServiceComponent.java
index a84798b..16c38a8 100644
---
a/modules/plugins/hadoop/src/main/java/org/rhq/plugins/hadoop/HadoopServiceComponent.java
+++
b/modules/plugins/hadoop/src/main/java/org/rhq/plugins/hadoop/HadoopServiceComponent.java
@@ -19,6 +19,7 @@
package org.rhq.plugins.hadoop;
+import java.io.File;
import java.util.Set;
import org.apache.commons.logging.Log;
@@ -36,6 +37,9 @@ import org.rhq.core.domain.measurement.MeasurementReport;
import org.rhq.core.domain.measurement.MeasurementScheduleRequest;
import org.rhq.core.pluginapi.configuration.ConfigurationFacet;
import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport;
+import org.rhq.core.pluginapi.event.EventContext;
+import org.rhq.core.pluginapi.event.log.Log4JLogEntryProcessor;
+import org.rhq.core.pluginapi.event.log.LogFileEventPoller;
import org.rhq.core.pluginapi.inventory.ResourceComponent;
import org.rhq.core.pluginapi.inventory.ResourceContext;
import org.rhq.core.pluginapi.measurement.MeasurementFacet;
@@ -48,7 +52,10 @@ public class HadoopServiceComponent extends
JMXServerComponent<ResourceComponent
JMXComponent<ResourceComponent<?>>, MeasurementFacet, OperationFacet,
ConfigurationFacet {
private static final Log LOG = LogFactory.getLog(HadoopServiceComponent.class);
-
+
+ private static final String LOG_EVENT_TYPE = "logEntry";
+ private static final String LOG_POLLING_INTERVAL_PROPERTY =
"logPollingInterval";
+
private HadoopServerConfigurationDelegate configurationDelegate;
private HadoopOperationsDelegate operationsDelegate;
@@ -59,6 +66,22 @@ public class HadoopServiceComponent extends
JMXServerComponent<ResourceComponent
super.start(context);
configurationDelegate = new HadoopServerConfigurationDelegate(context);
this.operationsDelegate = new HadoopOperationsDelegate(context);
+
+ EventContext events = context.getEventContext();
+ if (events != null) {
+ File logFile = determineLogFile();
+ int interval =
Integer.parseInt(context.getPluginConfiguration().getSimpleValue(LOG_POLLING_INTERVAL_PROPERTY,
"60"));
+ events.registerEventPoller(new LogFileEventPoller(events, LOG_EVENT_TYPE,
logFile, new Log4JLogEntryProcessor(LOG_EVENT_TYPE, logFile)), interval);
+ }
+ }
+
+ @Override
+ public void stop() {
+ EventContext events = getResourceContext().getEventContext();
+ if (events != null) {
+ events.unregisterEventPoller(LOG_EVENT_TYPE);
+ }
+ super.stop();
}
/**
@@ -142,4 +165,45 @@ public class HadoopServiceComponent extends
JMXServerComponent<ResourceComponent
HadoopSupportedOperations operation = HadoopSupportedOperations.valueOf(name);
return operationsDelegate.invoke(operation, params);
}
+
+ private File determineLogFile() {
+ String username =
getResourceContext().getNativeProcess().getCredentialsName().getUser();
+ String hostname = getResourceContext().getSystemInformation().getHostname();
+
+ String serverType = getServerType();
+
+ String name = "hadoop-" + username + "-" + serverType +
"-" + hostname + ".log";
+
+ return new File(new File(getHomeDir(), "logs"), name);
+ }
+
+ private String getServerType() {
+ String mainClass =
getResourceContext().getPluginConfiguration().getSimpleValue("_mainClass");
+ int dot = mainClass.lastIndexOf('.');
+ String className = mainClass.substring(dot + 1);
+
+ return className.toLowerCase();
+ }
+
+ private File getHomeDir() {
+ File homeDir =
+ new
File(getResourceContext().getPluginConfiguration().getSimpleValue(HadoopServiceDiscovery.HOME_DIR_PROPERTY));
+
+ if (!homeDir.exists()) {
+ throw new IllegalArgumentException("The configured home directory of
this Hadoop instance ("
+ + homeDir.getAbsolutePath() + ") no longer exists.");
+ }
+
+ if (!homeDir.isDirectory()) {
+ throw new IllegalArgumentException("The configured home directory of
this Hadoop instance ("
+ + homeDir.getAbsolutePath() + ") is not a directory.");
+ }
+
+ if (!homeDir.canRead()) {
+ throw new IllegalArgumentException("The configured home directory of
this Hadoop instance ("
+ + homeDir.getAbsolutePath() + ") is not readable.");
+ }
+
+ return homeDir;
+ }
}
diff --git a/modules/plugins/hadoop/src/main/resources/META-INF/rhq-plugin.xml
b/modules/plugins/hadoop/src/main/resources/META-INF/rhq-plugin.xml
index 2f07b20..36bc092 100644
--- a/modules/plugins/hadoop/src/main/resources/META-INF/rhq-plugin.xml
+++ b/modules/plugins/hadoop/src/main/resources/META-INF/rhq-plugin.xml
@@ -10,6 +10,7 @@
<c:simple-property name="hadoop.home.dir" displayName="Home
Directory"/>
<c:simple-property name="_mainClass" displayName="Main
Class" readOnly="true"
default="org.apache.hadoop.hdfs.server.namenode.NameNode"/>
+ <c:simple-property name="logPollingInterval" default="60"
description="The interval for log file polling in seconds."/>
</plugin-configuration>
<process-scan name="NameNode"
query="process|basename|match=^java.*,arg|-Dproc_namenode|match=.*"/>
@@ -44,10 +45,14 @@
<metric
property="Hadoop:service=NameNode,name=FSNamesystemState:PendingReplicationBlocks"
displayName="Pending Replication Blocks"
units="none"/>
+ <event name="logEntry" description="an entry in a log
file"/>
+
<resource-configuration>
<c:simple-property name="conf/core-site.xml:fs.default.name"
displayName="Namenode URI"/>
<c:simple-property name="conf/hdfs-site.xml:dfs.name.dir"
displayName="Local Namespace and Logs Storage Directory"
description="Path on the local filesystem where the NameNode stores the
namespace and transactions logs persistently."/>
+ <c:simple-property name="conf/hdfs-site.xml:dfs.block.size"
displayName="HDFS Block Size"
+ description="Path on the local filesystem where the NameNode stores the
namespace and transactions logs persistently. The value is in bytes."/>
</resource-configuration>
</server>
@@ -122,6 +127,14 @@
<metric
property="Hadoop:service=JobTracker,name=JobTrackerMetrics:trackers_graylisted"
displayName="Graylisted Nodes"/>
<metric
property="Hadoop:service=JobTracker,name=JobTrackerMetrics:trackers_decommissioned"
displayName="Excluded Nodes"/>
+ <resource-configuration>
+ <c:simple-property name="conf/mapred-site.xml:mapred.job.tracker"
displayName="Host And Port" description="Host or IP and port of JobTracker.
host:port pair."/>
+ <c:simple-property name="conf/mapred-site.xml:mapred.system.dir"
displayName="System Files Location" description="Path on the HDFS where
where the MapReduce framework stores system files e.g. /hadoop/mapred/system/. This is in
the default filesystem (HDFS) and must be accessible from both the server and client
machines."/>
+ <c:simple-property name="conf/mapred-site.xml:mapred.local.dir"
displayName="Data Files Location" description="Comma-separated list of
paths on the local filesystem where temporary MapReduce data is written. Multiple paths
help spread disk i/o."/>
+ <c:simple-property
name="conf/mapred-site.xml:mapred.tasktracker.map.tasks.maximum"
displayName="Maximum Map Tasks" description="The maximum number of Map
tasks, which are run simultaneously on a given TaskTracker, individually. Defaults to 2 (2
maps and 2 reduces), but vary it depending on your hardware."/>
+ <c:simple-property
name="conf/mapred-site.xml:mapred.tasktracker.reduce.tasks.maximum"
displayName="Maximum Reduce Tasks" description="The maximum number of
Reduce tasks, which are run simultaneously on a given TaskTracker, individually. Defaults
to 2 (2 maps and 2 reduces), but vary it depending on your hardware."/>
+ <c:simple-property name="conf/mapred-site.xml:mapred.queue.names"
displayName="Job Queues" description="Comma separated list of queues to
which jobs can be submitted. The MapReduce system always supports atleast one queue with
the name as default. Hence, this parameter's value should always contain the string
default. Some job schedulers supported in Hadoop, like the Capacity Scheduler, support
multiple queues. If such a scheduler is being used, the list of configured queue names
must be specified here. Once queues are defined, users can submit jobs to a queue using
the property name mapred.job.queue.name in the job configuration. There could be a
separate configuration file for configuring properties of these queues that is managed by
the scheduler. Refer to the documentation of the scheduler for information on the
same."/>
+ </resource-configuration>
</server>
<!-- TaskTracker (
http://wiki.apache.org/hadoop/TaskTracker) -->